repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
GaZ3ll3/scikit-image | doc/examples/plot_denoise.py | 17 | 2078 | """
====================
Denoising a picture
====================
In this example, we denoise a noisy version of the picture of the astronaut
Eileen Collins using the total variation and bilateral denoising filter.
These algorithms typically produce "posterized" images with flat domains
separated by sharp edges. It is possible to change the degree of posterization
by controlling the tradeoff between denoising and faithfulness to the original
image.
Total variation filter
----------------------
The result of this filter is an image that has a minimal total variation norm,
while being as close to the initial image as possible. The total variation is
the L1 norm of the gradient of the image.
Bilateral filter
----------------
A bilateral filter is an edge-preserving and noise reducing filter. It averages
pixels based on their spatial closeness and radiometric similarity.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data, img_as_float
from skimage.restoration import denoise_tv_chambolle, denoise_bilateral
astro = img_as_float(data.astronaut())
astro = astro[220:300, 220:320]
noisy = astro + 0.6 * astro.std() * np.random.random(astro.shape)
noisy = np.clip(noisy, 0, 1)
fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(8, 5))
plt.gray()
ax[0, 0].imshow(noisy)
ax[0, 0].axis('off')
ax[0, 0].set_title('noisy')
ax[0, 1].imshow(denoise_tv_chambolle(noisy, weight=0.1, multichannel=True))
ax[0, 1].axis('off')
ax[0, 1].set_title('TV')
ax[0, 2].imshow(denoise_bilateral(noisy, sigma_range=0.05, sigma_spatial=15))
ax[0, 2].axis('off')
ax[0, 2].set_title('Bilateral')
ax[1, 0].imshow(denoise_tv_chambolle(noisy, weight=0.2, multichannel=True))
ax[1, 0].axis('off')
ax[1, 0].set_title('(more) TV')
ax[1, 1].imshow(denoise_bilateral(noisy, sigma_range=0.1, sigma_spatial=15))
ax[1, 1].axis('off')
ax[1, 1].set_title('(more) Bilateral')
ax[1, 2].imshow(astro)
ax[1, 2].axis('off')
ax[1, 2].set_title('original')
fig.subplots_adjust(wspace=0.02, hspace=0.2,
top=0.9, bottom=0.05, left=0, right=1)
plt.show()
| bsd-3-clause |
alvarofierroclavero/scikit-learn | sklearn/setup.py | 225 | 2856 | import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('svm')
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('feature_extraction')
config.add_subpackage('feature_extraction/tests')
config.add_subpackage('cluster')
config.add_subpackage('cluster/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('decomposition')
config.add_subpackage('decomposition/tests')
config.add_subpackage("ensemble")
config.add_subpackage("ensemble/tests")
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_subpackage('externals')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('neighbors')
config.add_subpackage('neural_network')
config.add_subpackage('preprocessing')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('semi_supervised')
config.add_subpackage("tree")
config.add_subpackage("tree/tests")
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/cluster')
config.add_subpackage('metrics/cluster/tests')
# add cython extension module for isotonic regression
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
rdeits/Identify-Polygons | ga/fitness.py | 2 | 9070 | from __future__ import division
import numpy as np
# from scipy import stats, weave
import Image
import matplotlib.pyplot as plt
import csv
import time
import os
from matplotlib.nxutils import pnpoly
def orthogonal_regression(x,y, calc_residue = False):
"""Given two arrays, x and y, perform orthogonal regression on them, as
described in http://en.wikipedia.org/wiki/Deming_regression. Returns
[slope, intercept, total residue]"""
n = len(x)
x_bar = np.mean(x)
y_bar = np.mean(y)
s_xx = 1/(n-1) * np.sum(np.power(x-x_bar,2))
s_xy = 1/(n-1) * np.sum(np.multiply((x-x_bar),(y-y_bar)))
s_yy = 1/(n-1) * np.sum(np.power(y-y_bar,2))
beta1 = ((s_yy - s_xx + np.sqrt(np.power(s_yy-s_xx,2)+4*np.power(s_xy,2)))/
(2*s_xy))
beta0 = y_bar - beta1*x_bar
a = -beta1
b = 1
c = -beta0
# print "slope:", beta1
# print "intercept:", beta0
# print "residue:",residue
if calc_residue:
residue = np.sum(np.divide(np.abs(a*x + b*y + c),np.sqrt(a**2 + b**2)))
return beta1, beta0, residue
else:
return beta1, beta0
def calculate_residue(x, y, A, B):
"""Given the x and y coordinates of a set of points, calculate their
distance from the line segment from point A to point B. For each point in
the x and y lists, if that point is between A and B, then return its
orthogonal distance to the line AB. Otherwise, return its distance to the
closer of point A and B"""
x = np.array(x)
y = np.array(y)
u = ((x - A[0]) * (B[0] - A[0]) + (y - A[1]) * (B[1]-A[1])) /\
((B[0] - A[0])**2 + (B[1] - A[1])**2)
for i in range(len(u)):
if u[i] < 0:
u[i] = 0
elif u[i] > 1:
u[i] = 1
Cx = A[0] + u * (B[0] - A[0])
Cy = A[1] + u * (B[1] - A[1])
return np.sum(np.sqrt((x - Cx)**2 + (y - Cy)**2))
class PolygonTester:
"""A class based on the FitnessFunction class from ga.py to be used for
polygon fitting. It takes a filename and a number of sides and tests
polygon fits against the data in the file. The file can either be a CSV
file of (x, y) pairs of points approximately defining the edges of the
polygon or a PNG image, in which any point with non-zero red value is
considered a candidate for part of the edge of the polygon.
When doing optimization, the PolygonTester's __call__ expects a list of
length num_vars specifiying the indices of four points in its data set,
sorted by their angle relative to their centroid. It returns the total
orthogonal distance of all points from the polygon calculated using those
four indices for orthogonal regression."""
def __init__(self, input_data, num_vars, residue_method = 'segment'):
self.num_vars = num_vars
self.residue_method = residue_method
self.load_data(input_data)
self.calculate_angles()
self.lb = [0]*num_vars
self.ub = [len(self.data)]*num_vars
self.sort_data()
assert self.residue_method == 'segment' or self.residue_method == 'line'
def load_data(self,input_data):
if isinstance(input_data, str):
extension = input_data.split(os.path.extsep)[-1]
assert extension == 'png' or extension == 'csv', "Data must be png or csv"
if extension == 'png':
self.data_type = "image"
self.image = Image.open(input_data)
(self.width,self.height) = self.image.size
self.image = self.image.convert('RGB')
self.image_data = np.resize(self.image.getdata(),(self.height,self.width,3))
self.data = []
# f = open('data.csv','wb')
# csv_writer = csv.writer(f)
for x in range(self.width):
for y in range(self.height):
if self.image_data[y][x][0] > 0:
# csv_writer.writerow([x,y])
self.data.append([x,y])
# f.close()
else:
self.data_type = "csv"
csv_reader = csv.reader(open(input_data,'rb'))
self.data = [[float(row[0]),float(row[1])] for row in csv_reader]
else:
self.data_type = "array"
self.data = input_data
self.x_list = np.array([el[0] for el in self.data])
self.y_list = np.array([el[1] for el in self.data])
self.x_range = [min(self.x_list),max(self.x_list)]
self.y_range = [min(self.y_list),max(self.y_list)]
if self.data_type == "csv":
self.width = self.x_range[1]-self.x_range[0]
self.height = self.y_range[1] - self.y_range[0]
self.centroid = (sum(self.x_list)/len(self.x_list),
sum(self.y_list)/len(self.y_list))
# print "centroid:",self.centroid
def calculate_angles(self):
self.angles = np.zeros(len(self.data))
for i,[x,y] in enumerate(self.data):
if x == self.centroid[0]:
if y > self.centroid[1]:
self.angles[i] = np.pi/2
else:
self.angles[i] = np.pi*3/2
else:
self.angles[i] = np.arctan((y-self.centroid[1])/(x-self.centroid[0]))
if x < self.centroid[0]:
self.angles[i] += np.pi
self.angles[i] %= (2*np.pi)
def sort_data(self):
# print np.argsort(self.angles)
# self.data = self.data[np.argsort(self.angles)]
# self.x_list = np.array([el[0] for el in self.data])
# self.y_list = np.array([el[1] for el in self.data])
self.x_list = self.x_list[np.argsort(self.angles)]
self.y_list = self.y_list[np.argsort(self.angles)]
def __call__(self,indices):
"""Divide the data points up into num_vars bins at the points specified
in indices. Then perform orthogonal regression on all the points in
each bin and return the total error. Thus, each bin of points is
considered to be a candidate for the collection of all the points along
a given side of the polygon."""
self.sane = True
self.generate_polygon(indices)
self.sane = self.sane and pnpoly(self.centroid[0], self.centroid[1], self.corners)
if not self.sane:
return 1e308
else:
return self.error
def generate_polygon(self, indices):
slopes = []
intercepts = []
self.corners = []
self.error = 0
self.x_bins = []
self.y_bins = []
for i, t0 in enumerate(indices):
t1 = indices[(i+1)%len(indices)]
if t0 < t1:
x_bin = self.x_list[t0:t1]
y_bin = self.y_list[t0:t1]
else:
x_bin = np.hstack((self.x_list[t0:],self.x_list[:t1]))
y_bin = np.hstack((self.y_list[t0:],self.y_list[:t1]))
if len(x_bin) > 1:
if self.residue_method == 'line':
m, b, residue = orthogonal_regression(x_bin, y_bin, True)
self.error += residue
else:
m, b = orthogonal_regression(x_bin, y_bin, False)
slopes.append(m)
intercepts.append(b)
else:
m = b = 0
# print "no points between",t0,'and',t1, ", killing..."
self.sane = False
self.x_bins.append(x_bin)
self.y_bins.append(y_bin)
for i in range(len(slopes)):
m0 = slopes[i]
b0 = intercepts[i]
m1 = slopes[(i+1)%len(slopes)]
b1 = intercepts[(i+1)%len(slopes)]
x = (b1-b0)/(m0-m1)
self.corners.append([x, m0*x+b0])
if self.residue_method == 'segment':
for i in range(len(self.corners)):
self.error += calculate_residue(self.x_bins[i], self.y_bins[i],
self.corners[i-1], self.corners[i])
return self.corners
def plot_estimate(self, indices):
# plt.figure()
plt.hold(True)
if self.data_type == "image":
x = range(self.width)
y = range(self.height)
X, Y = np.meshgrid(x,y)
plt.contourf(X,Y,self.image_data[:,:,0])
else:
plt.plot(self.x_list,self.y_list,'bo')
self.generate_polygon(indices)
plt.plot([self.corners[i][0] for i in range(-1,len(self.corners))],
[self.corners[i][1] for i in range(-1,len(self.corners))], 'r-')
plt.xlim(self.x_range)
plt.ylim(self.y_range)
plt.show()
if self.data_type == "csv":
f = open("corners.csv",'wb')
csv_writer = csv.writer(f)
for point in self.corners:
csv_writer.writerow(point)
f.close()
if __name__ == "__main__":
tester = PolygonTester('sonar_data.csv',4)
print tester([0,4,8,12])
| mit |
HIPS/optofit | optofit/plotting/plotting.py | 1 | 6666 | """
General code for plotting the state of the neuron
"""
from optofit.utils.utils import extract_names_from_dtype, get_item_at_path, sz_dtype, as_matrix
from optofit.observation.observable import *
import matplotlib.pyplot as plt
import numpy as np
def plot_latent_compartment_state(t, z, state, compartment, axs=None, colors=['k'], linewidth=1):
dtype = compartment.latent_dtype
lb = compartment.latent_lb
ub = compartment.latent_ub
D = sz_dtype(dtype)
z_comp = get_item_at_path(z, compartment.path)
z = as_matrix(z_comp, D)
z_names = extract_names_from_dtype(dtype)
# Compute the channel currents
s_comp = get_item_at_path(state, compartment.path)
N_ch = len(compartment.channels)
Is = [s_comp[ch.name]['I'] for ch in compartment.channels]
# if fig is None:
# fig,axs = plt.subplots(D,1)
# else:
# axs = fig.get_axes()
# # Make sure axs is a list of axes, even if it is length 1
# if not isinstance(axs, (list, np.ndarray)):
# axs = [axs]
if axs is None:
axs = []
for d in np.arange(D):
ax = plt.subplot2grid((D,3), (d,0), colspan=2)
axs.append(ax)
ax = plt.subplot2grid((D,3), (0,2), rowspan=D)
axs.append(ax)
for d in np.arange(D):
axs[d].plot(t, z[d,:], color=colors[d % len(colors)], linewidth=linewidth)
axs[d].set_ylabel(z_names[d])
yl = list(axs[d].get_ylim())
if np.isfinite(lb[d]):
yl[0] = lb[d]
if np.isfinite(ub[d]):
yl[1] = ub[d]
axs[d].set_ylim(yl)
# Plot the channel densities
C = len(compartment.channels)
gs = [ch.g.value for ch in compartment.channels]
names = [ch.name for ch in compartment.channels]
axs[-1].bar(np.arange(C), gs, facecolor=colors[0], alpha=0.5)
axs[-1].set_xticks(np.arange(C))
axs[-1].set_xticklabels(map(lambda n: '$g_%s$' % n, names))
axs[-1].set_title('Channel densities')
axs[-1].set_ylim([0,30])
# if not fig_given:
# plt.show()
return axs
def plot_latent_compartment_V_and_I(t, data_sequence, compartment, observation,
axs=None, colors=['k'], linewidth=1):
Z = data_sequence.latent
S = data_sequence.states
O = data_sequence.observations
V = get_item_at_path(Z, compartment.path)['V']
Ca = get_item_at_path(Z, compartment.path)['[Ca]']
if isinstance(observation, NewDirectCompartmentVoltage):
F = get_item_at_path(O, observation.path)['V']
if isinstance(observation, LowPassCompartmentVoltage):
F = get_item_at_path(O, observation.path)['V']
elif isinstance(observation, LinearFluorescence):
F = get_item_at_path(O, observation.path)['Flr']
else:
F = None
# Check for inputs
I = np.zeros_like(t)
try:
I = get_item_at_path(data_sequence.input, compartment.path)['I']
except:
# No input current
pass
try:
I = get_item_at_path(data_sequence.input, compartment.path)['Irr']
except:
# No input irradiance
pass
# Compute the channel currents
s_comp = get_item_at_path(S, compartment.path)
# Num rows = N_ch (oner per channel current) +
# 1 (input) + 1 (voltage) + 1 (calcium)
D = len(compartment.channels) + 3
# Set the relative width of the time series to the conductances
r = 3
if axs is None:
axs = []
for d in np.arange(D):
ax = plt.subplot2grid((D,r+1), (d,0), colspan=r)
axs.append(ax)
# Add one more axis for the concentrations
ax = plt.subplot2grid((D,r+1), (0,r), rowspan=D)
axs.append(ax)
# Plot the voltage)
axs[0].plot(t, I, color='b', lw=linewidth)
axs[0].set_ylabel('$I_{%s}$' % compartment.name)
# Plot the voltage
axs[1].plot(t, V, color=colors[0], lw=linewidth)
axs[1].set_ylabel('$V_{%s}$' % compartment.name)
# Plot the calcium
axs[2].plot(t, Ca, color=colors[0], lw=linewidth)
axs[2].set_ylabel('$[Ca]_{%s}$' % compartment.name)
if F is not None:
axs[1].plot(t, F, color='b', lw=linewidth)
for i,ch in enumerate(compartment.channels):
I = s_comp[ch.name]['I']
axs[i+3].plot(t, I, color=colors[i % len(colors)], linewidth=linewidth)
axs[i+3].set_ylabel('$I_{%s}$' % ch.name)
# Plot the channel densities
C = len(compartment.channels)
gs = [ch.g.value for ch in compartment.channels]
names = [ch.name for ch in compartment.channels]
axs[-1].bar(np.arange(C), gs, facecolor=colors[0], alpha=0.5)
axs[-1].set_xticks(0.5+np.arange(C))
axs[-1].set_xticklabels(map(lambda n: '$g_{%s}$' % n, names))
axs[-1].set_title('Channel densities')
return axs
def plot_latent_state(t, z, dtype, fig=None, colors=['k'], linewidth=1):
D,T = z.shape
z_names = extract_names_from_dtype(dtype)
plt.ion()
fig_given = fig is not None
if not fig_given:
fig,axs = plt.subplots(D,1)
else:
axs = fig.get_axes()
# Make sure axs is a list of axes, even if it is length 1
if not isinstance(axs, list):
axs = [axs]
for d in np.arange(D):
axs[d].plot(t, z[d,:], color=colors[d % len(colors)], linewidth=linewidth)
axs[d].set_ylabel(z_names[d])
if not fig_given:
plt.show()
def plot_latent_currents(t, z, neuron, inpt, fig=None, colors=['k']):
state = neuron.evaluate_state(z, inpt)
T = z.size
# Get the latent currents
I_names = []
gs = np.array([])
Is = np.zeros((T,0))
for c in neuron.compartments:
# Get the sub-structured arrays for this comp
chs = c.channels
# Now compute the per channel currents in this compartment
for ch in chs:
if state[c.name][ch.name].dtype.names is not None and \
'I' in state[c.name][ch.name].dtype.names:
gs = np.concatenate((gs, [ch.g.value]))
Is = np.concatenate((Is, -1.0*state[c.name][ch.name]['I'][:,np.newaxis]),
axis=1)
I_names.append('I_'+ch.name)
C = Is.shape[1]
fig_given = fig is not None
if not fig_given:
fig,axs = plt.subplots(C,1)
else:
axs = fig.get_axes()
# Make sure axs is a list of axes, even if it is length 1
# if not isinstance(axs, np.ndarray):
# axs = np.array([axs])
for c in np.arange(C):
axs[c].plot(t, Is[:,c], color=colors[c % len(colors)], linewidth=2)
axs[c].set_ylabel(I_names[c])
if not fig_given:
plt.show()
return fig, axs
| gpl-2.0 |
aldebaran1/gsit | gsit/pyRinex.py | 1 | 14019 | #!/usr/bin/env
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 15 14:15:16 2016
@author of the file: smrak
Autrors of the code: Greg Starr and Michael Hirsch
"""
from __future__ import division,absolute_import,print_function
import numpy as np
from datetime import datetime
from pandas import Panel4D, DataFrame, Series
from pandas.io.pytables import read_hdf
from os.path import splitext,expanduser
from io import BytesIO
from os.path import getsize
import os
import yaml
import glob
from operator import add
def writeRinexObsHeader2yaml(fname):
"""
Sebastijan Mrak
Function takes the folder with Rinex Obseravation files and finds all files
with '.15o' extension. Than it iterates through all filaes to find header and
save it to yaml file with the same name.
"""
header = readRinexObsHeader(fname)
filename = splitext(expanduser(fname))
yaml_fn = filename[0] + '.yaml'
with open(yaml_fn, 'w') as outfile:
yaml.dump(header, outfile, default_flow_style=True)
def writeRinexObsHeaders2yaml(folder):
"""
Sebastijan Mrak
Function takes the folder with Rinex Obseravation files and finds all files
with '.15o' extension. Than it iterates through all filaes to find header and
save it to yaml file with the same name.
"""
ext = '*.1*o'
flist = sorted(glob.glob(folder+ext))
for doc in flist:
#print doc
header = readRinexObsHeader(doc)
filename = splitext(expanduser(doc))
yaml_fn = filename[0] + '.yaml'
if not os.path.isfile(yaml_fn):
print ('converted: ', yaml_fn)
with open(yaml_fn, 'w') as outfile:
yaml.dump(header, outfile, default_flow_style=True)
else:
print ('Flie exist: ', yaml_fn)
def readRinexNav(rinex_nav_filename):
"""
Michael Hirsch
It may actually be faster to read the entire file via f.read() and then .split()
and asarray().reshape() to the final result, but I did it frame by frame.
http://gage14.upc.es/gLAB/HTML/GPS_Navigation_Rinex_v2.11.html
"""
startcol = 3 #column where numerical data starts
nfloat = 19 #number of text elements per float data number
nline = 7 #number of lines per record
with open(expanduser(rinex_nav_filename),'r') as f:
#find end of header, which has non-constant length
while True:
if 'END OF HEADER' in f.readline(): break
#handle frame by frame
sv = []; epoch=[]; raws=''
while True:
headln = f.readline()
if not headln: break
#handle the header
sv.append(headln[:2])
year = int(headln[2:5])
if (80 <= year <= 99):
year+=1900
elif (year < 80): #good till year 2180
year+=2000
epoch.append(datetime(year = year,
month = int(headln[5:8]),
day = int(headln[8:11]),
hour = int(headln[11:14]),
minute = int(headln[14:17]),
second = int(headln[17:20]),
microsecond = int(headln[21])*100000))
"""
now get the data.
Use rstrip() to chomp newlines consistently on Windows and Python 2.7/3.4
Specifically [:-1] doesn't work consistently as .rstrip() does here.
"""
raw = (headln[22:].rstrip() +
' '.join(f.readline()[startcol:].rstrip() for _ in range(nline-1))
+f.readline()[startcol:40].rstrip())
raws += raw + '\n'
#print (raws)
raws = raws.replace('D', 'E')
raws = raws.replace(' -', '-')
raws = raws.replace(' ', ' ')
aa = raws.split('\n')
#print (type(aa))
for i in range(len(aa)):
if len(aa[i]) == 532:
aa[i] = aa[i] +'0.000000000000E+00'
strio = BytesIO('\n'.join(aa).encode())
darr = np.genfromtxt(strio,delimiter=nfloat)
nav= DataFrame(darr, epoch,
['SVclockBias','SVclockDrift','SVclockDriftRate','IODE',
'Crs','DeltaN','M0','Cuc',
'Eccentricity','Cus','sqrtA','TimeEph',
'Cic','OMEGA','CIS','Io',
'Crc','omega','OMEGA DOT','IDOT',
'CodesL2','GPSWeek','L2Pflag','SVacc',
'SVhealth','TGD','IODC', 'TransTime',
'FitIntvl'])
nav['sv'] = Series(np.asarray(sv,int), index=nav.index)
return nav
def writeRinexObs2Hdf(rinex_obs_file_name, odir=None):
"""
Function writeObs2Hdf takes the rinex obseravation data .15o and writes
the observation data into new hdf .h5 file in the same folder as original
rinex observation data file.
Code is resturctured after Greg Starr's rinexObs function
"""
filename,ext = splitext(expanduser(rinex_obs_file_name))
with open(rinex_obs_file_name,'r') as f:
lines = f.read().splitlines(True)
lines.append('')
header,version,headlines,obstimes,sats,svset = scan(lines)
data = processBlocks(lines,header,obstimes,svset,headlines,sats)
h5fn = filename + '.h5'
if odir is not None:
h5fn = odir
data.to_hdf(h5fn,key='data',mode='w',format='table')
print('Write succesfull. \n {} is a RINEX {} file, {} kB.'.format(
rinex_obs_file_name,version,getsize(rinex_obs_file_name)/1000.0))
def readRinexObsHeader(obs_file_name):
with open(obs_file_name, 'r') as f:
lines = f.read().splitlines(True)
lines.append('')
header = scanHeader(lines)
return header
def readRinexObsHdf(hdf5_file_name):
"""
Function readObsHdf opens the input .h5 file with raw data structured in
hdf file. Besides restructured observation data in pandas.panel4D, the
function finds the original obseravarion rinex data with .15o extension,
which has to be in the same folder af hdf file, and reads the header of it.
Function's output is thus, header data structured as dictionary and
observarion data structured as pandas.panel4D type.
Code is resturctured after Greg Starr's rinexObs function
"""
filename, ext = splitext(expanduser(hdf5_file_name))
obs_data_ext = '.15o'
obs_header_file_name = filename + obs_data_ext
with open(obs_header_file_name,'r') as f:
lines = f.read().splitlines(True)
lines.append('')
header,version,headlines,obstimes,sats,svset = scan(lines)
data = read_hdf(hdf5_file_name,key='data')
return header, data, list(svset), obstimes
def scanHeader(lines):
header={}
eoh=0
for i,line in enumerate(lines):
if "END OF HEADER" in line:
eoh=i
break
if line[60:].strip() not in header:
header[line[60:].strip()] = line[:60].strip()
else:
header[line[60:].strip()] += " "+line[:60].strip()
try:
verRinex = float(header['RINEX VERSION / TYPE'].split()[0])
header['APPROX POSITION XYZ'] = [float(i) for i in header[
'APPROX POSITION XYZ'].split()]
header['# / TYPES OF OBSERV'] = header['# / TYPES OF OBSERV'].split()
header['# / TYPES OF OBSERV'][0] = int(header['# / TYPES OF OBSERV'][0])
header['INTERVAL'] = float(header['INTERVAL'])
except:
pass
return header
def scan(lines):
"""
Written by greg Starr
This function sets up the rinex file parsing by quickly running through
the file, looking for the line at which each time block starts, the time
of each block, the satellites in view at each time, and overall what
satellites are in the rinex file
inputs:
lines - list containing each line in the rinex file as a string
outputs:
header - all the header info in a dictionary
verRinex - the rinex file's version
headlines - a list of ints, the index of lines where each time block
starts
obstimes - list of times corresponding to each block, same length as
headlines
sats - the satellites in view at each time, should be same length
as headlines
svset - the set of all the satellites in the rinex file
"""
header={}
eoh=0
for i,line in enumerate(lines):
if "END OF HEADER" in line:
eoh=i
break
if line[60:].strip() not in header:
header[line[60:].strip()] = line[:60].strip()
else:
header[line[60:].strip()] += " "+line[:60].strip()
try:
verRinex = float(header['RINEX VERSION / TYPE'].split()[0])
header['APPROX POSITION XYZ'] = [float(i) for i in header[
'APPROX POSITION XYZ'].split()]
header['# / TYPES OF OBSERV'] = header['# / TYPES OF OBSERV'].split()
header['# / TYPES OF OBSERV'][0] = int(header['# / TYPES OF OBSERV'][0])
header['INTERVAL'] = float(header['INTERVAL'])
except:
pass
headlines=[]
obstimes=[]
sats=[]
svset=set()
indicator=[]
i = eoh + 1
while True:
if not lines[i]: break
if len(lines[i]) < 28:
i+=1
# print ('In the loop')
# print (i, len(lines[i]))
if not int(lines[i][28]):
#no flag or flag=0
headlines.append(i)
obstimes.append(_obstime([lines[i][1:3],lines[i][4:6],
lines[i][7:9],lines[i][10:12],
lines[i][13:15],lines[i][16:18],lines[i][19:25]]))
numsvs = int(lines[i][30:32])
if(numsvs > 12):
indicator=[]
sat_numbers=[]
for s in range(numsvs):
if (s == 12 or s==24):
i += 1
line = lines[i][32:]
indicator.append(line[0+(s%12)*3])
sat_numbers.append(int(lines[i][33+(s%12)*3:35+(s%12)*3]))
# GALILLEO sat enumerated 60-
indicator1 = [w.replace('E', '60') for w in indicator]
# GLONASS satellites enumerated 32-
indicator1 = [w.replace('R', '32') for w in indicator1]
# GPS satellites enumerated 0-32
indicator1 = [w.replace('G', '0') for w in indicator1]
constant = np.array(list(map(int, indicator1)))
sat_numbers = np.array(sat_numbers)
out = constant + sat_numbers
sats.append(out)
else:
sats.append([int(lines[i][33+s*3:35+s*3]) for s in range(numsvs)])
i+=numsvs*int(np.ceil(header['# / TYPES OF OBSERV'][0]/5))+1
else:
#there was a comment or some header info
flag=int(lines[i][28])
if(flag!=4):
print(flag)
skip=int(lines[i][30:32])
i+=skip+1
for sv in sats:
svset = svset.union(set(sv))
print ('Finished with scanning lines')
return header,verRinex,headlines,obstimes,sats,svset
def _obstime(fol):
"""
Written by greg Starr
turns a listed date collected from the rinex file into a datetime,
this is just a utility function.
"""
year = int(fol[0])
if (80 <= year <= 99):
year+=1900
elif (year < 80): #because we might pass in four-digit year
year+=2000
return datetime(year = year, month = int(fol[1]), day = int(fol[2]),
hour = int(fol[3]), minute = int(fol[4]),
second = int(float(fol[5])),
microsecond = int(fol[6])
)
def _block2df(block,obstypes,svnames,svnum):
"""
input: block of text corresponding to one time increment INTERVAL of
RINEX file output: 2-D array of float64 data from block. Future: consider
whether best to use Numpy, Pandas, or Xray.
"""
nobs = len(obstypes)
stride = 3
strio = BytesIO(block.encode())
barr = np.genfromtxt(strio, delimiter=(14,1,1)*5).reshape((svnum,-1),
order='C')
data = barr[:,0:nobs*stride:stride]
lli = barr[:,1:nobs*stride:stride]
ssi = barr[:,2:nobs*stride:stride]
data = np.vstack(([data],[lli],[ssi])).T
return data
def processBlocks(lines,header,obstimes,svset,headlines,sats):
"""
turns the rinex file and the info from scan() into a Panel4D
inputs:
the info from scan(), see scan() above
outputs:
blocks - the Panel4D with all the data, see above for organization
"""
obstypes = header['# / TYPES OF OBSERV'][1:]
blocks = np.nan*np.ones((len(obstypes),max(svset)+1,len(obstimes),3))
for i in range(len(headlines)):
linesinblock = len(sats[i])*int(np.ceil(header['# / TYPES OF OBSERV'][0]/5))
block = ''.join(lines[headlines[i]+1+int(len(sats[i])/13):headlines[i]+linesinblock+1+int(len(sats[i])/13)])
bdf = _block2df(block,obstypes,sats[i],len(sats[i]))
blocks[:,np.asarray(sats[i],int),i,:] = bdf
#print (blocks)
"""
it is way faster to turn a big numpy array into a Panel4D than
to make the Panel4D first and assign it one cell at a time,
Panel4Ds are slow, it is best to use numpy when possible
"""
blocks = Panel4D(blocks,
labels=obstypes,
items=np.arange(max(svset)+1),
major_axis=obstimes,
minor_axis=['data','lli','ssi'])
blocks = blocks[:,list(svset),:,:]
return blocks | mit |
eddiemonroe/opencog | opencog/embodiment/Monitor/monitor_widget.py | 17 | 6067 | #
# Widgets each is in charge of a mind agent within OAC.
# The main job of a widget is getting data from Plaza within OAC and drawing graphs
#
# @author: Zhenhua Cai, [email protected]
# @date: 2011-11-18
#
# @note: I borrowed some code from
# http://matplotlib.sourceforge.net/examples/user_interfaces/embedding_in_qt4.html
#
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from PyQt4 import QtGui, QtCore
import zmq
import json
from common import *
import matplotlib as mpl
mpl.rcParams['lines.linewidth'] = 1.0
mpl.rcParams['font.size'] = 8.0
mpl.rcParams['axes.titlesize'] = 'large'
mpl.rcParams['legend.fancybox'] = True
mpl.rcParams['legend.fontsize'] = 'large'
mpl.rcParams['legend.shadow'] = False
mpl.rcParams['legend.numpoints'] = 1
mpl.rcParams['figure.facecolor'] = 'white'
# for black background
#mpl.rcParams['axes.facecolor'] = 'black'
#mpl.rcParams['axes.edgecolor'] = 'white'
mpl.rcParams['figure.subplot.left'] = 0.05 # the left side of the subplots of the figure
mpl.rcParams['figure.subplot.right'] = 0.95 # the right side of the subplots of the figure
mpl.rcParams['figure.subplot.bottom'] = 0.05 # the bottom of the subplots of the figure
mpl.rcParams['figure.subplot.top'] = 0.90 # the top of the subplots of the figure
class MonitorWidget(FigureCanvas):
""" Qt4 backend of matplot, which provides a canvas for plotting.
The actual plotting is done within the MonitorThread class automatically.
"""
clicked = QtCore.pyqtSignal()
def __init__(self, publish_endpoint, filter_key,
parent=None, width=5, height=4, dpi=100):
self.filter_key = filter_key
self.tick_interval = 0.01 # real time (in sec) = timestamp * tick_interval
# Initialize figure canvas
self.figure = Figure(figsize=(width, height), dpi=dpi)
self.axes = self.figure.add_subplot(111)
self.axes.hold(True) # Axes would be cleared each time plot() is called
FigureCanvas.__init__(self, self.figure)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding
)
FigureCanvas.updateGeometry(self)
# Initialize variables related to graph
self.max_data_len = 50
# timestamps can be erratic, so we can't assume they'll be
# evenly spaced
self.max_time_period = 30000
self.has_initialized = False
# expected format for data to be plotted is:
# { "timestamp": [1,2,...]
# "label1" : [val1,val2,...],
# "label2" : [val1,val2,...],
# }
self.data_dict = {}
self.legend_list = []
# Create and start ZeroMQ subscriber thread
self.zmq_subscriber_thread = ZmqSubscriberThread(self,
publish_endpoint,
filter_key)
self.zmq_subscriber_thread.start()
# Initialize data, legend and table title of recording file
def initialize_data(self, json_dict):
self.initialTimeStamp = json_dict["timestamp"]
f = file(self.filter_key, "w")
f.write("time\t\t")
for k, v in json_dict.iteritems():
self.data_dict[k] = [v]
if k != "timestamp":
self.legend_list.append(k)
f.write(k+"\t\t")
f.write("\n")
f.close()
self.has_initialized = True
# Update data list and also record them in the external file named after filter_key
def update_data(self, json_dict):
self.latestTimeStamp = json_dict["timestamp"]
forgetFirst = False
f = file(self.filter_key, "a")
elapsed_time = (self.latestTimeStamp - self.initialTimeStamp) * self.tick_interval
f.write(str(elapsed_time) + "\t\t")
# forget old data
if self.latestTimeStamp - self.data_dict["timestamp"][0] > self.max_time_period or \
len(self.data_dict["timestamp"]) > self.max_data_len:
forgetFirst = True
for k, v in json_dict.iteritems():
self.data_dict[k].append(v)
if forgetFirst: self.data_dict[k].pop(0)
if k != "timestamp": f.write(str(v)+"\t\t")
f.write("\n")
f.close()
# Draw the graph on the widget
def draw_graph(self):
self.axes.clear()
max_t = max(self.data_dict["timestamp"])
t_minus = [(x - self.latestTimeStamp)*self.tick_interval for x in self.data_dict["timestamp"]]
for k in self.data_dict:
if k == "timestamp": continue
self.axes.plot(t_minus, self.data_dict[k], '-+')
leg = self.axes.legend(self.legend_list,
'upper left',
shadow=True)
self.axes.set_title(self.zmq_subscriber_thread.filter_key)
self.axes.grid(True)
self.axes.set_xlim(-self.max_time_period*self.tick_interval,0)
self.axes.set_ylim(0,1)
self.draw()
@pyqtSlot(dict)
def handle_data_update(self, json_dict):
"""
Process the data in json format
"""
if not self.has_initialized:
self.initialize_data(json_dict)
self.update_data(json_dict)
# Draw the graph only where no other graph is being rendered.
# In principle, the global lock is not necessary,
# however drawing graph is very CPU consuming,
# introduce this limit may make GUI more responsive
if self.isVisible():
# glb.gui_read_write_lock.lockForWrite()
self.draw_graph()
# glb.gui_read_write_lock.unlock()
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self.clicked.emit()
| agpl-3.0 |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/matplotlib/cm.py | 8 | 11657 | """
This module provides a large set of colormaps, functions for
registering new colormaps and for getting a colormap by name,
and a mixin class for adding color mapping functionality.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import os
import numpy as np
from numpy import ma
import matplotlib as mpl
import matplotlib.colors as colors
import matplotlib.cbook as cbook
from matplotlib._cm import datad
from matplotlib._cm import cubehelix
from matplotlib._cm_listed import cmaps as cmaps_listed
cmap_d = dict()
# reverse all the colormaps.
# reversed colormaps have '_r' appended to the name.
def _reverser(f):
def freversed(x):
return f(1 - x)
return freversed
def revcmap(data):
"""Can only handle specification *data* in dictionary format."""
data_r = {}
for key, val in six.iteritems(data):
if six.callable(val):
valnew = _reverser(val)
# This doesn't work: lambda x: val(1-x)
# The same "val" (the first one) is used
# each time, so the colors are identical
# and the result is shades of gray.
else:
# Flip x and exchange the y values facing x = 0 and x = 1.
valnew = [(1.0 - x, y1, y0) for x, y0, y1 in reversed(val)]
data_r[key] = valnew
return data_r
def _reverse_cmap_spec(spec):
"""Reverses cmap specification *spec*, can handle both dict and tuple
type specs."""
if 'red' in spec:
return revcmap(spec)
else:
revspec = list(reversed(spec))
if len(revspec[0]) == 2: # e.g., (1, (1.0, 0.0, 1.0))
revspec = [(1.0 - a, b) for a, b in revspec]
return revspec
def _generate_cmap(name, lutsize):
"""Generates the requested cmap from it's name *name*. The lut size is
*lutsize*."""
spec = datad[name]
# Generate the colormap object.
if 'red' in spec:
return colors.LinearSegmentedColormap(name, spec, lutsize)
else:
return colors.LinearSegmentedColormap.from_list(name, spec, lutsize)
LUTSIZE = mpl.rcParams['image.lut']
# Generate the reversed specifications ...
for cmapname in list(six.iterkeys(datad)):
spec = datad[cmapname]
spec_reversed = _reverse_cmap_spec(spec)
datad[cmapname + '_r'] = spec_reversed
# Precache the cmaps with ``lutsize = LUTSIZE`` ...
# Use datad.keys() to also add the reversed ones added in the section above:
for cmapname in six.iterkeys(datad):
cmap_d[cmapname] = _generate_cmap(cmapname, LUTSIZE)
cmap_d.update(cmaps_listed)
locals().update(cmap_d)
# Continue with definitions ...
def register_cmap(name=None, cmap=None, data=None, lut=None):
"""
Add a colormap to the set recognized by :func:`get_cmap`.
It can be used in two ways::
register_cmap(name='swirly', cmap=swirly_cmap)
register_cmap(name='choppy', data=choppydata, lut=128)
In the first case, *cmap* must be a :class:`matplotlib.colors.Colormap`
instance. The *name* is optional; if absent, the name will
be the :attr:`~matplotlib.colors.Colormap.name` attribute of the *cmap*.
In the second case, the three arguments are passed to
the :class:`~matplotlib.colors.LinearSegmentedColormap` initializer,
and the resulting colormap is registered.
"""
if name is None:
try:
name = cmap.name
except AttributeError:
raise ValueError("Arguments must include a name or a Colormap")
if not cbook.is_string_like(name):
raise ValueError("Colormap name must be a string")
if isinstance(cmap, colors.Colormap):
cmap_d[name] = cmap
return
# For the remainder, let exceptions propagate.
if lut is None:
lut = mpl.rcParams['image.lut']
cmap = colors.LinearSegmentedColormap(name, data, lut)
cmap_d[name] = cmap
def get_cmap(name=None, lut=None):
"""
Get a colormap instance, defaulting to rc values if *name* is None.
Colormaps added with :func:`register_cmap` take precedence over
built-in colormaps.
If *name* is a :class:`matplotlib.colors.Colormap` instance, it will be
returned.
If *lut* is not None it must be an integer giving the number of
entries desired in the lookup table, and *name* must be a standard
mpl colormap name.
"""
if name is None:
name = mpl.rcParams['image.cmap']
if isinstance(name, colors.Colormap):
return name
if name in cmap_d:
if lut is None:
return cmap_d[name]
else:
return cmap_d[name]._resample(lut)
else:
raise ValueError(
"Colormap %s is not recognized. Possible values are: %s"
% (name, ', '.join(sorted(cmap_d.keys()))))
class ScalarMappable(object):
"""
This is a mixin class to support scalar data to RGBA mapping.
The ScalarMappable makes use of data normalization before returning
RGBA colors from the given colormap.
"""
def __init__(self, norm=None, cmap=None):
r"""
Parameters
----------
norm : :class:`matplotlib.colors.Normalize` instance
The normalizing object which scales data, typically into the
interval ``[0, 1]``.
If *None*, *norm* defaults to a *colors.Normalize* object which
initializes its scaling based on the first data processed.
cmap : str or :class:`~matplotlib.colors.Colormap` instance
The colormap used to map normalized data values to RGBA colors.
"""
self.callbacksSM = cbook.CallbackRegistry()
if cmap is None:
cmap = get_cmap()
if norm is None:
norm = colors.Normalize()
self._A = None
#: The Normalization instance of this ScalarMappable.
self.norm = norm
#: The Colormap instance of this ScalarMappable.
self.cmap = get_cmap(cmap)
#: The last colorbar associated with this ScalarMappable. May be None.
self.colorbar = None
self.update_dict = {'array': False}
def to_rgba(self, x, alpha=None, bytes=False):
"""
Return a normalized rgba array corresponding to *x*.
In the normal case, *x* is a 1-D or 2-D sequence of scalars, and
the corresponding ndarray of rgba values will be returned,
based on the norm and colormap set for this ScalarMappable.
There is one special case, for handling images that are already
rgb or rgba, such as might have been read from an image file.
If *x* is an ndarray with 3 dimensions,
and the last dimension is either 3 or 4, then it will be
treated as an rgb or rgba array, and no mapping will be done.
If the last dimension is 3, the *alpha* kwarg (defaulting to 1)
will be used to fill in the transparency. If the last dimension
is 4, the *alpha* kwarg is ignored; it does not
replace the pre-existing alpha. A ValueError will be raised
if the third dimension is other than 3 or 4.
In either case, if *bytes* is *False* (default), the rgba
array will be floats in the 0-1 range; if it is *True*,
the returned rgba array will be uint8 in the 0 to 255 range.
Note: this method assumes the input is well-behaved; it does
not check for anomalies such as *x* being a masked rgba
array, or being an integer type other than uint8, or being
a floating point rgba array with values outside the 0-1 range.
"""
# First check for special case, image input:
try:
if x.ndim == 3:
if x.shape[2] == 3:
if alpha is None:
alpha = 1
if x.dtype == np.uint8:
alpha = np.uint8(alpha * 255)
m, n = x.shape[:2]
xx = np.empty(shape=(m, n, 4), dtype=x.dtype)
xx[:, :, :3] = x
xx[:, :, 3] = alpha
elif x.shape[2] == 4:
xx = x
else:
raise ValueError("third dimension must be 3 or 4")
if bytes and xx.dtype != np.uint8:
xx = (xx * 255).astype(np.uint8)
if not bytes and xx.dtype == np.uint8:
xx = xx.astype(float) / 255
return xx
except AttributeError:
# e.g., x is not an ndarray; so try mapping it
pass
# This is the normal case, mapping a scalar array:
x = ma.asarray(x)
x = self.norm(x)
x = self.cmap(x, alpha=alpha, bytes=bytes)
return x
def set_array(self, A):
'Set the image array from numpy array *A*'
self._A = A
self.update_dict['array'] = True
def get_array(self):
'Return the array'
return self._A
def get_cmap(self):
'return the colormap'
return self.cmap
def get_clim(self):
'return the min, max of the color limits for image scaling'
return self.norm.vmin, self.norm.vmax
def set_clim(self, vmin=None, vmax=None):
"""
set the norm limits for image scaling; if *vmin* is a length2
sequence, interpret it as ``(vmin, vmax)`` which is used to
support setp
ACCEPTS: a length 2 sequence of floats
"""
if (vmin is not None and vmax is None and
cbook.iterable(vmin) and len(vmin) == 2):
vmin, vmax = vmin
if vmin is not None:
self.norm.vmin = vmin
if vmax is not None:
self.norm.vmax = vmax
self.changed()
def set_cmap(self, cmap):
"""
set the colormap for luminance data
ACCEPTS: a colormap or registered colormap name
"""
cmap = get_cmap(cmap)
self.cmap = cmap
self.changed()
def set_norm(self, norm):
'set the normalization instance'
if norm is None:
norm = colors.Normalize()
self.norm = norm
self.changed()
def autoscale(self):
"""
Autoscale the scalar limits on the norm instance using the
current array
"""
if self._A is None:
raise TypeError('You must first set_array for mappable')
self.norm.autoscale(self._A)
self.changed()
def autoscale_None(self):
"""
Autoscale the scalar limits on the norm instance using the
current array, changing only limits that are None
"""
if self._A is None:
raise TypeError('You must first set_array for mappable')
self.norm.autoscale_None(self._A)
self.changed()
def add_checker(self, checker):
"""
Add an entry to a dictionary of boolean flags
that are set to True when the mappable is changed.
"""
self.update_dict[checker] = False
def check_update(self, checker):
"""
If mappable has changed since the last check,
return True; else return False
"""
if self.update_dict[checker]:
self.update_dict[checker] = False
return True
return False
def changed(self):
"""
Call this whenever the mappable is changed to notify all the
callbackSM listeners to the 'changed' signal
"""
self.callbacksSM.process('changed', self)
for key in self.update_dict:
self.update_dict[key] = True
| mit |
barentsen/dave | misc/sincfilter.py | 1 | 7330 |
import matplotlib.pyplot as mp
import numpy as np
"""
Functions to apply high and low pass filters to 1d data.
Based on "The Scientist and Engineers' Guide to Digital Signal
Processing" by Steven Smith. Ch 16, Eqn 16.4. This book is available
online at www.dspguide.com/ch16/
Remember, these functions apply to evenly spaced data only.
"""
__version__ = "$Id: sincfilter.py 1983 2015-03-08 04:50:13Z fmullall $"
__URL__ = "$URL: svn+ssh://flux/home/fmullall/svn/kepler/k2phot/sincfilter.py $"
def normalisedFrequencyFromPeriod(period, timespan):
return normaliseFrequency(1/float(period), timespan)
def normaliseFrequency(f, timespan):
"""Convert a frequency in the units of the data into normalised frequency.
Useful for computing input arguments in {high|low}Pass()
Inputs:
f (float) Frequency in, e.g, Hz
timespace (float) Range of data in , eg, seconds. This is the time
interval between the last and first data points in your
set.
"""
return f/float(timespan)
def blackman(sysSize):
"""Generate the Blackman apodisation.
Taken from "The Scientist and Engineers' Guide to Digital Signal
Processing" by Steven Smith. Ch 16, Eqn 16.4. This book is available
online at www.dspguide.com/ch16/2.html
This function is used to apodise a sinc filter to improve it's
stopband attentuation.
sysSize: (int) Number of points being used in the filter. Smith
calls this M
Returns:
A 1d array, blackman[i]. The sinc filter is apodised by computing
apodisedFilter[i] = sinc[i] * blackman[i]
"""
sysSize = float(2*sysSize+1)
i = np.arange(sysSize)
#Compute function from right to left. No reason other than it's
#easier to implement
arg = 4.*np.pi*i/sysSize
blackman = .08*np.cos(arg)
arg = 2.*np.pi*i/sysSize
blackman -= 0.5*np.cos(arg)
blackman += 0.42
return blackman
def rectangle(sysSize):
return np.ones(2*sysSize+1)
def highPass(y, normalisedCutoffFreq, filterHalfWidth, apodise=blackman):
"""Apply a high pass filter to evenly spaced data
Based on sincFilter in this module. See that function for more
detailed info on who Smith is.
Inputs:
y (1d float array) Array of evenly spaced data points to filter
normalisedCutOffFreq (float) Frequency at which 50% attentuation
occurs. This input must have a value between
[0,.5). Smith names this value f_c
numPointsInFilter (int) Half the number of points included
in the filter. Smith refers to this value as M.
The number of points used is (2M+1)
apodise (func) What apodisation function to use. The
default is usually the best choice. If you
don't want apodisation for some reason, set
this argument to rectangle
Returns:
A 1d array high pass filtered.
"""
lp = lowPass(y, normalisedCutoffFreq, filterHalfWidth, apodise)
return y - lp
def lowPass(y, normalisedCutoffFreq, filterHalfWidth, apodise=blackman):
"""Apply a low pass filter to evenly spaced data
Based on sincFilter in this module. See that function for more
detailed info on who Smith is.
Inputs:
y (1d float array) Array of evenly spaced data points to filter
normalisedCutOffFreq (float) Frequency at which 50% attentuation
occurs. This input must have a value between
[0,.5). Smith names this value f_c
numPointsInFilter (int) Half the number of points included
in the filter. Smith refers to this value as M.
The number of points used is (2M+1)
apodise (func) What apodisation function to use. The
default is usually the best choice. If you
don't want apodisation for some reason, set
this argument to rectangle
Returns:
A 1d array low pass filtered.
"""
assert(filterHalfWidth %2 == 0)
fc = normalisedCutoffFreq
num = int(filterHalfWidth)
filt = apodise(num) * sincFilter(fc, num)
filt /= np.sum(filt)
#mp.subplot(211)
#mp.plot(filt)
#mp.subplot(212)
#ft = np.fft.rfft(filt)/ np.sqrt(len(filt))
#mp.semilogy(np.abs(ft))
out = np.empty(len(y))
for i in range(len(out)):
i1 = max(i-num, 0)
i2 = min(i+num, len(out)) - 1
j1 = max(num-i, 0)
j2 = min(j1 + i2 - i1, 2*num+1)
out[i] = np.sum( y[i1:i2] * filt[j1:j2])
return out
def sincFilter(normalisedCutoffFreq, numPointsInFilter):
"""
Compute a sincFilter with numPointsInFilter points
Taken from "The Scientist and Engineers' Guide to Digital Signal
Processing" by Steven Smith. Ch 16, Eqn 16.4. This book is available
online at www.dspguide.com/ch16/2.html
A sinc filter is a low pass filter with a sharp cutoff. The larger
the value of numPointsInFilter, the sharper the cutoff.
Sinc functions suffer from ringing on either side of the cutoff which
can be dramatically reduced by apodising the filter with, e.g., a
blackman filter.
The
Inputs:
normalisedCutOffFreq (float) Frequency at which 50% attentuation
occurs. This input must have a value between
[0,.5). Smith names this value f_c
numPointsInFilter (int) Half the number of points included
in the filter. Smith refers to this value as M.
The number of points used is (2M+1)
Returns:
A 1d array of length numPointsInFilter.
Notes:
If your have a desired cutoff frequency in Hz, the normalised value
can be computed as f/T, where f is the cutoff in Hz, and T is the
timespan of the data in seconds.
"""
if numPointsInFilter %2 != 0:
raise ValueError("numPointsInFilter must be even")
#Mnuemonics
fc = normalisedCutoffFreq
num = 2*numPointsInFilter + 1
assert(fc >= 0)
assert(fc < 0.5)
i = np.arange(num)
j = i- .5*num
#return np.sinc( 2*np.pi*fc * j)
numerator = np.sin(2*np.pi*fc * j)
denom = np.pi*j
#Catch the division by zero, and replace with 1
idx = j == 0
numerator[idx] = 1
denom[idx] = 1
return numerator/denom
def example():
n =2e4
fc = .05
sysSize = 100
mp.clf()
y = np.random.randn(n)
y1 = lowPass(y, fc, sysSize, apodise=rectangle)
y2 = lowPass(y, fc, sysSize, apodise=blackman)
ft = np.fft.rfft(y) / np.sqrt(n)
#ft1 = np.fft.rfft(y1) / np.sqrt(n)
#ft2 = np.fft.rfft(y2) / np.sqrt(n)
mp.clf()
#mp.subplot(211)
#mp.plot(y, 'b.')
##mp.plot(y1, 'r.')
#mp.plot(y2, 'g.')
#mp.subplot(212)
#mp.plot(np.abs(ft), 'b-')
##mp.plot(np.abs(ft1), 'r-')
#mp.plot(np.abs(ft2), 'g-')
fc = .01
sysSize = 400
y = sincFilter(fc, sysSize)
mp.plot(y, 'b-')
y = blackman(sysSize) * sincFilter(fc, sysSize)
mp.plot(y, 'g-')
| mit |
lukas/scikit-class | examples/keras-autoencoder/plotutil.py | 2 | 3725 | import matplotlib
matplotlib.use('Agg') # noqa
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from tensorflow import keras
import numpy as np
import wandb
def fig2data(fig):
"""
@brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it
@param fig a matplotlib figure
@return a numpy 3D array of RGBA values
"""
# draw the renderer
fig.canvas.draw()
# Get the RGBA buffer from the figure
w, h = fig.canvas.get_width_height()
buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)
buf.shape = (w, h, 4)
# canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
buf = np.roll(buf, 3, axis=2)
return buf
def plot_results(models,
data,
batch_size=128,
model_name="vae_mnist"):
"""Plots labels and MNIST digits as function of 2-dim latent vector
# Arguments:
models (tuple): encoder and decoder models
data (tuple): test data and label
batch_size (int): prediction batch size
model_name (string): which model is using this function
"""
os.makedirs(model_name, exist_ok=True)
filename = os.path.join(model_name, "vae_mean.png")
class PlotCallback(keras.callbacks.Callback):
def __init__(self, encoder, decoder, data):
self.encoder = encoder
self.decoder = decoder
self.x_test, self.y_test = data
self.batch_size = 64
def on_epoch_end(self, epoch, logs):
# Generate a figure with matplotlib</font>
figure = matplotlib.pyplot.figure(figsize=(10, 10))
plt = figure.add_subplot(111)
# display a 2D plot of the digit classes in the latent space
z_mean, _, _ = self.encoder.predict(self.x_test,
batch_size=self.batch_size)
plt.scatter(z_mean[:, 0], z_mean[:, 1], c=self.y_test)
# plt.colorbar()
# plt.xlabel("z[0]")
# plt.ylabel("z[1]")
data = fig2data(figure)
matplotlib.pyplot.close(figure)
wandb.log({"scatter": wandb.Image(data)}, commit=False)
# Generate a figure with matplotlib</font>
figure = matplotlib.pyplot.figure(figsize=(10, 10))
plt = figure.add_subplot(111)
# display a 30x30 2D manifold of digits
n = 30
digit_size = 28
fig = np.zeros((digit_size * n, digit_size * n))
# linearly spaced coordinates corresponding to the 2D plot
# of digit classes in the latent space
grid_x = np.linspace(-4, 4, n)
grid_y = np.linspace(-4, 4, n)[::-1]
for i, yi in enumerate(grid_y):
for j, xi in enumerate(grid_x):
z_sample = np.array([[xi, yi]])
x_decoded = self.decoder.predict(z_sample)
digit = x_decoded[0].reshape(digit_size, digit_size)
fig[i * digit_size: (i + 1) * digit_size,
j * digit_size: (j + 1) * digit_size] = digit
start_range = digit_size // 2
end_range = n * digit_size + start_range + 1
pixel_range = np.arange(start_range, end_range, digit_size)
sample_range_x = np.round(grid_x, 1)
sample_range_y = np.round(grid_y, 1)
#plt.xticks(pixel_range, sample_range_x)
#plt.yticks(pixel_range, sample_range_y)
# plt.xlabel("z[0]")
# plt.ylabel("z[1]")
plt.imshow(fig, cmap='Greys_r')
data = fig2data(figure)
matplotlib.pyplot.close(figure)
wandb.log({"grid": wandb.Image(data)}, commit=False)
| gpl-2.0 |
lin-credible/scikit-learn | sklearn/utils/random.py | 234 | 10510 | # Author: Hamzeh Alsalhi <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribtion over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
| bsd-3-clause |
anurag313/scikit-learn | examples/model_selection/plot_validation_curve.py | 229 | 1823 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
Eric89GXL/numpy | numpy/lib/npyio.py | 3 | 83698 | from __future__ import division, absolute_import, print_function
import io
import sys
import os
import re
import itertools
import warnings
import weakref
from operator import itemgetter, index as opindex
import numpy as np
from . import format
from ._datasource import DataSource
from numpy.core.multiarray import packbits, unpackbits
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like,
has_nested_fields, flatten_dtype, easy_dtype, _decode_line
)
from numpy.compat import (
asbytes, asstr, asunicode, asbytes_nested, bytes, basestring, unicode,
is_pathlib_path
)
if sys.version_info[0] >= 3:
import pickle
from collections.abc import Mapping
else:
import cPickle as pickle
from future_builtins import map
from collections import Mapping
def loads(*args, **kwargs):
# NumPy 1.15.0, 2017-12-10
warnings.warn(
"np.loads is deprecated, use pickle.loads instead",
DeprecationWarning, stacklevel=2)
return pickle.loads(*args, **kwargs)
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
]
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def __dir__(self):
"""
Enables dir(bagobj) to list the files in an NpzFile.
This also enables tab-completion in an interpreter or IPython.
"""
return list(object.__getattribute__(self, '_obj').keys())
def zipfile_factory(file, *args, **kwargs):
"""
Create a ZipFile.
Allows for Zip64, and the `file` argument can accept file, str, or
pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
constructor.
"""
if is_pathlib_path(file):
file = str(file)
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(file, *args, **kwargs)
class NpzFile(Mapping):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ``.npy`` extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ``.npy`` extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
allow_pickle : bool, optional
Allow loading pickled data. Default: True
pickle_kwargs : dict, optional
Additional keyword arguments to pass on to pickle.load.
These are only useful when loading object arrays saved on
Python 2 when using Python 3.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False, allow_pickle=True,
pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
self.allow_pickle = allow_pickle
self.pickle_kwargs = pickle_kwargs
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
# Implement the Mapping ABC
def __iter__(self):
return iter(self.files)
def __len__(self):
return len(self.files)
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = False
if key in self._files:
member = True
elif key in self.files:
member = True
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes,
allow_pickle=self.allow_pickle,
pickle_kwargs=self.pickle_kwargs)
else:
return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
if sys.version_info.major == 3:
# deprecate the python 2 dict apis that we supported by accident in
# python 3. We forgot to implement itervalues() at all in earlier
# versions of numpy, so no need to deprecated it here.
def iteritems(self):
# Numpy 1.15, 2018-02-20
warnings.warn(
"NpzFile.iteritems is deprecated in python 3, to match the "
"removal of dict.itertems. Use .items() instead.",
DeprecationWarning, stacklevel=2)
return self.items()
def iterkeys(self):
# Numpy 1.15, 2018-02-20
warnings.warn(
"NpzFile.iterkeys is deprecated in python 3, to match the "
"removal of dict.iterkeys. Use .keys() instead.",
DeprecationWarning, stacklevel=2)
return self.keys()
def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
encoding='ASCII'):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
Parameters
----------
file : file-like object, string, or pathlib.Path
The file to read. File-like objects must support the
``seek()`` and ``read()`` methods. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
`numpy.memmap` for a detailed description of the modes). A
memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
allow_pickle : bool, optional
Allow loading pickled object arrays stored in npy files. Reasons for
disallowing pickles include security, as loading pickled data can
execute arbitrary code. If pickles are disallowed, loading object
arrays will fail.
Default: True
fix_imports : bool, optional
Only useful when loading Python 2 generated pickled files on Python 3,
which includes npy/npz files containing object arrays. If `fix_imports`
is True, pickle will try to map the old Python 2 names to the new names
used in Python 3.
encoding : str, optional
What encoding to use when reading Python 2 strings. Only useful when
loading Python 2 generated pickled files in Python 3, which includes
npy/npz files containing object arrays. Values other than 'latin1',
'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
data. Default: 'ASCII'
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For ``.npz`` files, the returned instance
of NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
ValueError
The file contains an object array, but allow_pickle=False given.
See Also
--------
save, savez, savez_compressed, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the
context manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlying file descriptor is closed when exiting the 'with'
block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
elif is_pathlib_path(file):
fid = file.open("rb")
own_fid = True
else:
fid = file
if encoding not in ('ASCII', 'latin1', 'bytes'):
# The 'encoding' value for pickle also affects what encoding
# the serialized binary data of NumPy arrays is loaded
# in. Pickle does not pass on the encoding information to
# NumPy. The unpickling code in numpy.core.multiarray is
# written to assume that unicode data appearing where binary
# should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
#
# Other encoding values can corrupt binary data, and we
# purposefully disallow them. For the same reason, the errors=
# argument is not exposed, as values other than 'strict'
# result can similarly silently corrupt numerical data.
raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
if sys.version_info[0] >= 3:
pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = {}
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = b'PK\x03\x04'
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
# If the file size is less than N, we need to make sure not
# to seek past the beginning of the file
fid.seek(-min(N, len(magic)), 1) # back-up
if magic.startswith(_ZIP_PREFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
tmp = own_fid
own_fid = False
return NpzFile(fid, own_fid=tmp, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Try a pickle
if not allow_pickle:
raise ValueError("allow_pickle=False, but file does not contain "
"non-pickled data")
try:
return pickle.load(fid, **pickle_kwargs)
except Exception:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def save(file, arr, allow_pickle=True, fix_imports=True):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file, str, or pathlib.Path
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string or Path, a ``.npy``
extension will be appended to the file name if it does not already
have one.
arr : array_like
Array data to be saved.
allow_pickle : bool, optional
Allow saving object arrays using Python pickles. Reasons for disallowing
pickles include security (loading pickled data can execute arbitrary
code) and portability (pickled objects may not be loadable on different
Python installations, for example if the stored objects require libraries
that are not available, and not all pickled data is compatible between
Python 2 and Python 3).
Default: True
fix_imports : bool, optional
Only useful in forcing objects in object arrays on Python 3 to be
pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
will try to map the new Python 3 names to the old module names used in
Python 2, so that the pickle data stream is readable with Python 2.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
elif is_pathlib_path(file):
if not file.name.endswith('.npy'):
file = file.parent / (file.name + '.npy')
fid = file.open("wb")
own_fid = True
else:
fid = file
if sys.version_info[0] >= 3:
pickle_kwargs = dict(fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = None
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
arguments are given, the corresponding variable names, in the ``.npz``
file will match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string or a Path, the
``.npz`` extension will be appended to the file name if it is not
already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string or a Path, the
``.npz`` extension will be appended to the file name if it is not
already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
numpy.save : Save a single array to a binary file in NumPy format.
numpy.savetxt : Save an array to a file as plain text.
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
numpy.load : Load the files created by savez_compressed.
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is compressed with
``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable
in ``.npy`` format. For a description of the ``.npy`` format, see
:py:mod:`numpy.lib.format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> test_array = np.random.rand(3, 2)
>>> test_vector = np.random.rand(4)
>>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector)
>>> loaded = np.load('/tmp/123.npz')
>>> print(np.array_equal(test_array, loaded['a']))
True
>>> print(np.array_equal(test_vector, loaded['b']))
True
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
elif is_pathlib_path(file):
if not file.name.endswith('.npz'):
file = file.parent / (file.name + '.npz')
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError(
"Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zipfile_factory(file, mode="w", compression=compression)
if sys.version_info >= (3, 6):
# Since Python 3.6 it is possible to write directly to a ZIP file.
for key, val in namedict.items():
fname = key + '.npy'
val = np.asanyarray(val)
force_zip64 = val.nbytes >= 2**30
with zipf.open(fname, 'w', force_zip64=force_zip64) as fid:
format.write_array(fid, val,
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Stage arrays in a temporary file on disk, before writing to zip.
# Import deferred for startup time improvement
import tempfile
# Since target file might be big enough to exceed capacity of a global
# temporary directory, create temp file side-by-side with the target file.
file_dir, file_prefix = os.path.split(file) if _is_string_like(file) else (None, 'tmp')
fd, tmpfile = tempfile.mkstemp(prefix=file_prefix, dir=file_dir, suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val),
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
fid.close()
fid = None
zipf.write(tmpfile, arcname=fname)
except IOError as exc:
raise IOError("Failed to write to %s: %s" % (tmpfile, exc))
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zipf.close()
def _getconv(dtype):
""" Find the correct dtype converter. Adapted from matplotlib """
def floatconv(x):
x.lower()
if '0x' in x:
return float.fromhex(x)
return float(x)
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.longdouble):
return np.longdouble
elif issubclass(typ, np.floating):
return floatconv
elif issubclass(typ, complex):
return lambda x: complex(asstr(x).replace('+-', '-'))
elif issubclass(typ, np.bytes_):
return asbytes
elif issubclass(typ, np.unicode_):
return asunicode
else:
return asstr
# amount of lines loadtxt reads in one chunk, can be overridden for testing
_loadtxt_chunksize = 50000
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0, encoding='bytes'):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file, str, or pathlib.Path
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
structured data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str or sequence of str, optional
The characters or list of characters used to indicate the start of a
comment. None implies no comments. For backwards compatibility, byte
strings will be decoded as 'latin1'. The default is '#'.
delimiter : str, optional
The string used to separate values. For backwards compatibility, byte
strings will be decoded as 'latin1'. The default is whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will parse the
column string into the desired value. E.g., if column 0 is a date
string: ``converters = {0: datestr2num}``. Converters can also be
used to provide a default value for missing data (but see also
`genfromtxt`): ``converters = {3: lambda s: float(s.strip() or 0)}``.
Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : int or sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
.. versionchanged:: 1.11.0
When a single column has to be read it is possible to use
an integer instead of a tuple. E.g ``usecols = 3`` reads the
fourth column the same way as ``usecols = (3,)`` would.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a structured
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
encoding : str, optional
Encoding used to decode the inputfile. Does not apply to input streams.
The special value 'bytes' enables backward compatibility workarounds
that ensures you receive byte arrays as results if possible and passes
'latin1' encoded strings to converters. Override this value to receive
unicode arrays and pass strings as input to converters. If set to None
the system default is used. The default value is 'bytes'.
.. versionadded:: 1.14.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
.. versionadded:: 1.10.0
The strings produced by the Python float.hex method can be used as
input for floats.
Examples
--------
>>> from io import StringIO # StringIO behaves like a file object
>>> c = StringIO(u"0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO(u"M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO(u"1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
if comments is not None:
if isinstance(comments, (basestring, bytes)):
comments = [comments]
comments = [_decode_line(x) for x in comments]
# Compile regex for comments beforehand
comments = (re.escape(comment) for comment in comments)
regex_comments = re.compile('|'.join(comments))
if delimiter is not None:
delimiter = _decode_line(delimiter)
user_converters = converters
if encoding == 'bytes':
encoding = None
byte_converters = True
else:
byte_converters = False
if usecols is not None:
# Allow usecols to be a single int or a sequence of ints
try:
usecols_as_list = list(usecols)
except TypeError:
usecols_as_list = [usecols]
for col_idx in usecols_as_list:
try:
opindex(col_idx)
except TypeError as e:
e.args = (
"usecols must be an int or a sequence of ints but "
"it contains at least one element of type %s" %
type(col_idx),
)
raise
# Fall back to existing code
usecols = usecols_as_list
fown = False
try:
if is_pathlib_path(fname):
fname = str(fname)
if _is_string_like(fname):
fh = np.lib._datasource.open(fname, 'rt', encoding=encoding)
fencoding = getattr(fh, 'encoding', 'latin1')
fh = iter(fh)
fown = True
else:
fh = iter(fname)
fencoding = getattr(fname, 'encoding', 'latin1')
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
# input may be a python2 io stream
if encoding is not None:
fencoding = encoding
# we must assume local encoding
# TODO emit portability warning?
elif fencoding is None:
import locale
fencoding = locale.getpreferredencoding()
# not to be confused with the flatten_dtype we import...
def flatten_dtype_internal(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype_internal(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if tp.ndim > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter. """
line = _decode_line(line, encoding=encoding)
if comments is not None:
line = regex_comments.split(line, maxsplit=1)[0]
line = line.strip('\r\n')
if line:
return line.split(delimiter)
else:
return []
def read_data(chunk_size):
"""Parse each line, including the first.
The file read, `fh`, is a global defined above.
Parameters
----------
chunk_size : int
At most `chunk_size` lines are read at a time, with iteration
until all lines are read.
"""
X = []
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[j] for j in usecols]
if len(vals) != N:
line_num = i + skiprows + 1
raise ValueError("Wrong number of columns at line %d"
% line_num)
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
if len(X) > chunk_size:
yield X
X = []
if X:
yield X
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname, stacklevel=2)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype_internal(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
if byte_converters:
# converters may use decode to workaround numpy's old behaviour,
# so encode the string again before passing to the user converter
def tobytes_first(x, conv):
if type(x) is bytes:
return conv(x)
return conv(x.encode("latin1"))
import functools
converters[i] = functools.partial(tobytes_first, conv=conv)
else:
converters[i] = conv
converters = [conv if conv is not bytes else
lambda x: x.encode(fencoding) for conv in converters]
# read data in chunks and fill it into an array via resize
# over-allocating and shrinking the array later may be faster but is
# probably not relevant compared to the cost of actually reading and
# converting the data
X = None
for x in read_data(_loadtxt_chunksize):
if X is None:
X = np.array(x, dtype)
else:
nshape = list(X.shape)
pos = nshape[0]
nshape[0] += len(x)
X.resize(nshape, refcheck=False)
X[pos:, ...] = x
finally:
if fown:
fh.close()
# recursive closures have a cyclic reference to themselves, which
# requires gc to collect (gh-10620). To avoid this problem, for
# performance and PyPy friendliness, we break the cycle:
flatten_dtype_internal = None
pack_items = None
if X is None:
X = np.array([], dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if ndmin not in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# ', encoding=None):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : 1D or 2D array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
* a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
* a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns
* a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
String or character separating columns.
newline : str, optional
String or character separating lines.
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
encoding : {None, str}, optional
Encoding used to encode the outputfile. Does not apply to output
streams. If the encoding is something other than 'bytes' or 'latin1'
you will not be able to load the file in NumPy versions < 1.14. Default
is 'latin1'.
.. versionadded:: 1.14.0
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into an uncompressed ``.npz`` archive
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to precede result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<https://docs.python.org/library/string.html#format-specification-mini-language>`_,
Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
class WriteWrap(object):
"""Convert to unicode in py2 or to bytes on bytestream inputs.
"""
def __init__(self, fh, encoding):
self.fh = fh
self.encoding = encoding
self.do_write = self.first_write
def close(self):
self.fh.close()
def write(self, v):
self.do_write(v)
def write_bytes(self, v):
if isinstance(v, bytes):
self.fh.write(v)
else:
self.fh.write(v.encode(self.encoding))
def write_normal(self, v):
self.fh.write(asunicode(v))
def first_write(self, v):
try:
self.write_normal(v)
self.write = self.write_normal
except TypeError:
# input is probably a bytestream
self.write_bytes(v)
self.write = self.write_bytes
own_fh = False
if is_pathlib_path(fname):
fname = str(fname)
if _is_string_like(fname):
# datasource doesn't support creating a new file ...
open(fname, 'wt').close()
fh = np.lib._datasource.open(fname, 'wt', encoding=encoding)
own_fh = True
# need to convert str to unicode for text io output
if sys.version_info[0] == 2:
fh = WriteWrap(fh, encoding or 'latin1')
elif hasattr(fname, 'write'):
# wrap to handle byte output streams
fh = WriteWrap(fname, encoding or 'latin1')
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 0 or X.ndim > 2:
raise ValueError(
"Expected 1D or 2D array, got %dD array instead" % X.ndim)
elif X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif isinstance(fmt, str):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(comments + header + newline)
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
s = format % tuple(row2) + newline
fh.write(s.replace('+-', '-'))
else:
for row in X:
try:
v = format % tuple(row) + newline
except TypeError:
raise TypeError("Mismatch between array dtype ('%s') and "
"format specifier ('%s')"
% (str(X.dtype), format))
fh.write(v)
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(comments + footer + newline)
finally:
if own_fh:
fh.close()
def fromregex(file, regexp, dtype, encoding=None):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
encoding : str, optional
Encoding used to decode the inputfile. Does not apply to input streams.
.. versionadded:: 1.14.0
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = np.lib._datasource.open(file, 'rt', encoding=encoding)
own_fh = True
try:
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
content = file.read()
if isinstance(content, bytes) and isinstance(regexp, np.unicode):
regexp = asbytes(regexp)
elif isinstance(content, np.unicode) and isinstance(regexp, bytes):
regexp = asstr(regexp)
if not hasattr(regexp, 'match'):
regexp = re.compile(regexp)
seq = regexp.findall(content)
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
names=None, excludelist=None, deletechars=None,
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
invalid_raise=True, max_rows=None, encoding='bytes'):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file, str, pathlib.Path, list of str, generator
File, filename, list, or generator to read. If the filename
extension is `.gz` or `.bz2`, the file is first decompressed. Note
that generators must return byte strings in Python 3k. The strings
in a list or produced by a generator are treated as lines.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skiprows : int, optional
`skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
skip_footer : int, optional
The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing : variable, optional
`missing` was removed in numpy 1.10. Please use `missing_values`
instead.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first line after
the first `skip_header` lines. This line can optionally be proceeded
by a comment delimiter. If `names` is a sequence or a single-string of
comma-separated names, the names will be used to define the field names
in a structured dtype. If `names` is None, the names of the dtype
fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
loose : bool, optional
If True, do not raise errors for invalid values.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
max_rows : int, optional
The maximum number of rows to read. Must not be used with skip_footer
at the same time. If given, the value must be at least 1. Default is
to read the entire file.
.. versionadded:: 1.10.0
encoding : str, optional
Encoding used to decode the inputfile. Does not apply when `fname` is
a file object. The special value 'bytes' enables backward compatibility
workarounds that ensure that you receive byte arrays when possible
and passes latin1 encoded strings to converters. Override this value to
receive unicode arrays and pass strings as input to converters. If set
to None the system default is used. The default value is 'bytes'.
.. versionadded:: 1.14.0
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] NumPy User Guide, section `I/O with NumPy
<https://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from io import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO(u"1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO(u"11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
if max_rows is not None:
if skip_footer:
raise ValueError(
"The keywords 'skip_footer' and 'max_rows' can not be "
"specified at the same time.")
if max_rows < 1:
raise ValueError("'max_rows' must be at least 1.")
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
raise TypeError(
"The input argument 'converter' should be a valid dictionary "
"(got '%s' instead)" % type(user_converters))
if encoding == 'bytes':
encoding = None
byte_converters = True
else:
byte_converters = False
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if is_pathlib_path(fname):
fname = str(fname)
if isinstance(fname, basestring):
fhd = iter(np.lib._datasource.open(fname, 'rt', encoding=encoding))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError(
"fname must be a string, filehandle, list of strings, "
"or generator. Got %s instead." % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip, encoding=encoding)
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Skip the first `skip_header` rows
for i in range(skip_header):
next(fhd)
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = _decode_line(next(fhd), encoding)
if (names is True) and (comments is not None):
if comments in first_line:
first_line = (
''.join(first_line.split(comments)[1:]))
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = ''
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if comments is not None:
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([str(_.strip()) for _ in first_values])
first_line = ''
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
if isinstance(user_missing_values, bytes):
user_missing_values = user_missing_values.decode('latin1')
# Define the list of missing_values (one column: one list)
missing_values = [list(['']) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, basestring):
user_value = user_missing_values.split(",")
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values
if user_filling_values is None:
user_filling_values = []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped,
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (j, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(j):
try:
j = names.index(j)
i = j
except ValueError:
continue
elif usecols:
try:
i = usecols.index(j)
except ValueError:
# Unused converter specified
continue
else:
i = j
# Find the value to test - first_line is not filtered by usecols:
if len(first_line):
testing_value = first_values[j]
else:
testing_value = None
if conv is bytes:
user_conv = asbytes
elif byte_converters:
# converters may use decode to workaround numpy's old behaviour,
# so encode the string again before passing to the user converter
def tobytes_first(x, conv):
if type(x) is bytes:
return conv(x)
return conv(x.encode("latin1"))
import functools
user_conv = functools.partial(tobytes_first, conv=conv)
else:
user_conv = conv
converters[i].update(user_conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, user_conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
# Fixme: possible error as following variable never used.
# miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
if usecols:
# Select only the columns we need
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values,
missing_values)]))
if len(rows) == max_rows:
break
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning, stacklevel=2)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
if loose:
rows = list(
zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
else:
rows = list(
zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v == np.unicode_]
if byte_converters and strcolidx:
# convert strings back to bytes for backward compatibility
warnings.warn(
"Reading unicode strings without specifying the encoding "
"argument is deprecated. Set the encoding, use None for the "
"system default.",
np.VisibleDeprecationWarning, stacklevel=2)
def encode_unicode_cols(row_tup):
row = list(row_tup)
for i in strcolidx:
row[i] = row[i].encode('latin1')
return tuple(row)
try:
data = [encode_unicode_cols(r) for r in data]
except UnicodeEncodeError:
pass
else:
for i in strcolidx:
column_types[i] = np.bytes_
# Update string types to be the right length
sized_column_types = column_types[:]
for i, col_type in enumerate(column_types):
if np.issubdtype(col_type, np.character):
n_chars = max(len(row[i]) for row in data)
sized_column_types[i] = (col_type, n_chars)
if names is None:
# If the dtype is uniform (before sizing strings)
base = set([
c_type
for c, c_type in zip(converters, column_types)
if c._checked])
if len(base) == 1:
uniform_type, = base
(ddtype, mdtype) = (uniform_type, bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(sized_column_types)]
if usemask:
mdtype = [(defaultfmt % i, bool)
for (i, dt) in enumerate(sized_column_types)]
else:
ddtype = list(zip(names, sized_column_types))
mdtype = list(zip(names, [bool] * len(sized_column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
raise NotImplementedError(
"Nested fields involving objects are not supported...")
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
masks, dtype=np.dtype([('', bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for i, ttype in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if np.issubdtype(ttype, np.character):
ttype = (ttype, max(len(row[i]) for row in data))
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, bool) for _ in dtype.names]
else:
mdtype = bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names, converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != '']
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.setdefault("dtype", None)
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
# Set default kwargs for genfromtxt as relevant to csv import.
kwargs.setdefault("case_sensitive", "lower")
kwargs.setdefault("names", True)
kwargs.setdefault("delimiter", ",")
kwargs.setdefault("dtype", None)
output = genfromtxt(fname, **kwargs)
usemask = kwargs.get("usemask", False)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| bsd-3-clause |
ElDeveloper/qiita | qiita_db/metadata_template/test/test_util.py | 2 | 41564 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from six import StringIO
from inspect import currentframe, getfile
from os.path import dirname, abspath, join
from unittest import TestCase, main
import warnings
import numpy.testing as npt
import pandas as pd
from pandas.util.testing import assert_frame_equal
import qiita_db as qdb
class TestUtil(TestCase):
"""Tests some utility functions on the metadata_template module"""
def setUp(self):
metadata_dict = {
'Sample1': {'int_col': 1, 'float_col': 2.1, 'str_col': 'str1'},
'Sample2': {'int_col': 2, 'float_col': 3.1, 'str_col': '200'},
'Sample3': {'int_col': 3, 'float_col': 3, 'str_col': 'string30'},
}
self.metadata_map = pd.DataFrame.from_dict(metadata_dict,
orient='index', dtype=str)
self.headers = ['float_col', 'str_col', 'int_col']
self.mfp = join(
dirname(abspath(getfile(currentframe()))), 'support_files')
def test_prefix_sample_names_with_id(self):
exp_metadata_dict = {
'1.Sample1': {'int_col': 1, 'float_col': 2.1, 'str_col': 'str1'},
'1.Sample2': {'int_col': 2, 'float_col': 3.1, 'str_col': '200'},
'1.Sample3': {'int_col': 3, 'float_col': 3, 'str_col': 'string30'},
}
exp_df = pd.DataFrame.from_dict(exp_metadata_dict, orient='index',
dtype=str)
with warnings.catch_warnings(record=True) as warn:
qdb.metadata_template.util.prefix_sample_names_with_id(
self.metadata_map, 1)
self.assertEqual(len(warn), 0)
self.metadata_map.sort_index(inplace=True)
exp_df.sort_index(inplace=True)
assert_frame_equal(self.metadata_map, exp_df)
# test that it only prefixes the samples that are needed
metadata_dict = {
'Sample1': {'int_col': 1, 'float_col': 2.1, 'str_col': 'str1'},
'1.Sample2': {'int_col': 2, 'float_col': 3.1, 'str_col': '200'},
'Sample3': {'int_col': 3, 'float_col': 3, 'str_col': 'string30'},
}
metadata_map = pd.DataFrame.from_dict(
metadata_dict, orient='index', dtype=str)
with warnings.catch_warnings(record=True) as warn:
qdb.metadata_template.util.prefix_sample_names_with_id(
metadata_map, 1)
self.assertEqual(len(warn), 1)
self.assertEqual(str(warn[0].message), 'Some of the samples were '
'already prefixed with the study id.')
metadata_map.sort_index(inplace=True)
assert_frame_equal(metadata_map, exp_df)
# making sure that samples with the same sample name than the study are
# actually prepended
metadata_dict = {
'1': {'int_col': 1, 'float_col': 2.1, 'str_col': 'str1'},
'2': {'int_col': 2, 'float_col': 3.1, 'str_col': '200'},
}
metadata_map = pd.DataFrame.from_dict(
metadata_dict, orient='index', dtype=str)
qdb.metadata_template.util.prefix_sample_names_with_id(metadata_map, 1)
self.assertCountEqual(metadata_map.index, ['1.1', '1.2'])
def test_load_template_to_dataframe(self):
obs = qdb.metadata_template.util.load_template_to_dataframe(
StringIO(EXP_SAMPLE_TEMPLATE))
exp = pd.DataFrame.from_dict(SAMPLE_TEMPLATE_DICT_FORM, dtype=str)
exp.index.name = 'sample_name'
assert_frame_equal(obs, exp, check_like=True)
def test_load_template_to_dataframe_sample_id(self):
obs = npt.assert_warns(
qdb.exceptions.QiitaDBWarning,
qdb.metadata_template.util.load_template_to_dataframe,
StringIO(EXP_SAMPLE_TEMPLATE_WITH_SAMPLE_ID))
exp = pd.DataFrame.from_dict(SAMPLE_TEMPLATE_DICT_FORM, dtype=str)
exp.index.name = 'sample_name'
assert_frame_equal(obs, exp, check_like=True)
def test_load_template_to_dataframe_xlsx(self):
# test loading a qiimp file
fp = join(self.mfp, 'a_qiimp_wb.xlsx')
obs = qdb.metadata_template.util.load_template_to_dataframe(fp)
exp = pd.DataFrame.from_dict(EXP_QIIMP, dtype=str)
exp.index.name = 'sample_name'
assert_frame_equal(obs, exp, check_like=True)
# test loading an empty qiimp file
fp = join(self.mfp, 'empty_qiimp_wb.xlsx')
with self.assertRaises(ValueError) as error:
qdb.metadata_template.util.load_template_to_dataframe(fp)
self.assertEqual(str(error.exception), "The template is empty")
# test loading non qiimp file
fp = join(self.mfp, 'not_a_qiimp_wb.xlsx')
obs = qdb.metadata_template.util.load_template_to_dataframe(fp)
exp = pd.DataFrame.from_dict(EXP_NOT_QIIMP, dtype=str)
exp.index.name = 'sample_name'
assert_frame_equal(obs, exp, check_like=True)
def test_load_template_to_dataframe_qiime_map(self):
obs = qdb.metadata_template.util.load_template_to_dataframe(
StringIO(QIIME_TUTORIAL_MAP_SUBSET), index='#SampleID')
exp = pd.DataFrame.from_dict(QIIME_TUTORIAL_MAP_DICT_FORM, dtype=str)
exp.index.name = 'SampleID'
obs.sort_index(axis=0, inplace=True)
obs.sort_index(axis=1, inplace=True)
exp.sort_index(axis=0, inplace=True)
exp.sort_index(axis=1, inplace=True)
assert_frame_equal(obs, exp, check_like=True)
def test_load_template_to_dataframe_duplicate_cols(self):
LTTD = qdb.metadata_template.util.load_template_to_dataframe
with self.assertRaises(qdb.exceptions.QiitaDBDuplicateHeaderError):
LTTD(StringIO(EXP_SAMPLE_TEMPLATE_DUPE_COLS))
# testing duplicated empty headers
test = (
"sample_name\tdescription\t \t \t\t \t\n"
"sample1\tsample1\t \t \t\t\n"
"sample2\tsample2\t\t\t\t \t")
with self.assertRaises(ValueError):
LTTD(StringIO(test))
# testing empty columns
test = (
"sample_name\tdescription\tcol1\ttcol2\n"
"sample1\tsample1\t \t \n"
"sample2\tsample2\t \t")
df = LTTD(StringIO(test))
self.assertEqual(df.columns.values, ['description'])
def test_load_template_to_dataframe_scrubbing(self):
obs = qdb.metadata_template.util.load_template_to_dataframe(
StringIO(EXP_SAMPLE_TEMPLATE_SPACES))
exp = pd.DataFrame.from_dict(SAMPLE_TEMPLATE_DICT_FORM, dtype=str)
exp.index.name = 'sample_name'
assert_frame_equal(obs, exp, check_like=True)
def test_load_template_to_dataframe_empty_columns(self):
obs = npt.assert_warns(
qdb.exceptions.QiitaDBWarning,
qdb.metadata_template.util.load_template_to_dataframe,
StringIO(EXP_ST_SPACES_EMPTY_COLUMN))
exp = pd.DataFrame.from_dict(SAMPLE_TEMPLATE_DICT_FORM, dtype=str)
exp.index.name = 'sample_name'
assert_frame_equal(obs, exp, check_like=True)
def test_load_template_to_dataframe_empty_rows(self):
obs = qdb.metadata_template.util.load_template_to_dataframe(
StringIO(EXP_SAMPLE_TEMPLATE_SPACES_EMPTY_ROW))
exp = pd.DataFrame.from_dict(SAMPLE_TEMPLATE_DICT_FORM, dtype=str)
exp.index.name = 'sample_name'
assert_frame_equal(obs, exp, check_like=True)
def test_load_template_to_dataframe_no_sample_name_cast(self):
obs = qdb.metadata_template.util.load_template_to_dataframe(
StringIO(EXP_SAMPLE_TEMPLATE_NUMBER_SAMPLE_NAMES))
exp = pd.DataFrame.from_dict(
SAMPLE_TEMPLATE_NUMBER_SAMPLE_NAMES_DICT_FORM, dtype=str)
exp.index.name = 'sample_name'
obs.sort_index(inplace=True)
exp.sort_index(inplace=True)
assert_frame_equal(obs, exp, check_like=True)
def test_load_template_to_dataframe_empty_sample_names(self):
obs = qdb.metadata_template.util.load_template_to_dataframe(
StringIO(SAMPLE_TEMPLATE_NO_SAMPLE_NAMES))
exp = pd.DataFrame.from_dict(SAMPLE_TEMPLATE_DICT_FORM, dtype=str)
exp.index.name = 'sample_name'
assert_frame_equal(obs, exp, check_like=True)
obs = qdb.metadata_template.util.load_template_to_dataframe(
StringIO(SAMPLE_TEMPLATE_NO_SAMPLE_NAMES_SOME_SPACES))
exp = pd.DataFrame.from_dict(SAMPLE_TEMPLATE_DICT_FORM, dtype=str)
exp.index.name = 'sample_name'
assert_frame_equal(obs, exp, check_like=True)
def test_load_template_to_dataframe_empty_column(self):
obs = npt.assert_warns(
qdb.exceptions.QiitaDBWarning,
qdb.metadata_template.util.load_template_to_dataframe,
StringIO(SAMPLE_TEMPLATE_EMPTY_COLUMN))
exp = pd.DataFrame.from_dict(ST_EMPTY_COLUMN_DICT_FORM, dtype=str)
exp.index.name = 'sample_name'
assert_frame_equal(obs, exp, check_like=True)
def test_load_template_to_dataframe_column_with_nas(self):
obs = qdb.metadata_template.util.load_template_to_dataframe(
StringIO(SAMPLE_TEMPLATE_COLUMN_WITH_NAS))
exp = pd.DataFrame.from_dict(ST_COLUMN_WITH_NAS_DICT_FORM, dtype=str)
exp.index.name = 'sample_name'
assert_frame_equal(obs, exp, check_like=True)
def test_load_template_to_dataframe_exception(self):
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
qdb.metadata_template.util.load_template_to_dataframe(
StringIO(SAMPLE_TEMPLATE_NO_SAMPLE_NAME))
def test_load_template_to_dataframe_whitespace(self):
obs = qdb.metadata_template.util.load_template_to_dataframe(
StringIO(EXP_SAMPLE_TEMPLATE_WHITESPACE))
exp = pd.DataFrame.from_dict(SAMPLE_TEMPLATE_DICT_FORM, dtype=str)
exp.index.name = 'sample_name'
assert_frame_equal(obs, exp, check_like=True)
def test_load_template_to_dataframe_lowercase(self):
obs = qdb.metadata_template.util.load_template_to_dataframe(
StringIO(EXP_SAMPLE_TEMPLATE_MULTICASE))
exp = pd.DataFrame.from_dict(SAMPLE_TEMPLATE_DICT_FORM, dtype=str)
exp.index.name = 'sample_name'
exp.rename(columns={"str_column": "str_CoLumn"}, inplace=True)
assert_frame_equal(obs, exp, check_like=True)
def test_load_template_to_dataframe_non_utf8(self):
replace = EXP_SAMPLE_TEMPLATE.replace(
'Test Sample 2', u'Test Sample\x962')
qdb.metadata_template.util.load_template_to_dataframe(
StringIO(replace))
# setting back
replace = EXP_SAMPLE_TEMPLATE.replace(
u'Test Sample\x962', 'Test Sample 2')
qdb.metadata_template.util.load_template_to_dataframe(
StringIO(replace))
def test_load_template_to_dataframe_typechecking(self):
obs = qdb.metadata_template.util.load_template_to_dataframe(
StringIO(EXP_SAMPLE_TEMPLATE_LAT_ALL_INT))
exp = pd.DataFrame.from_dict(SAMPLE_TEMPLATE_LAT_ALL_INT_DICT,
dtype=str)
exp.index.name = 'sample_name'
assert_frame_equal(obs, exp, check_like=True)
obs = qdb.metadata_template.util.load_template_to_dataframe(
StringIO(EXP_SAMPLE_TEMPLATE_LAT_MIXED_FLOAT_INT))
exp = pd.DataFrame.from_dict(SAMPLE_TEMPLATE_MIXED_FLOAT_INT_DICT,
dtype=str)
exp.index.name = 'sample_name'
assert_frame_equal(obs, exp, check_like=True)
def test_load_template_to_dataframe_with_nulls(self):
obs = qdb.metadata_template.util.load_template_to_dataframe(
StringIO(EXP_SAMPLE_TEMPLATE_NULLS))
exp = pd.DataFrame.from_dict(SAMPLE_TEMPLATE_NULLS_DICT, dtype=str)
exp.index.name = 'sample_name'
assert_frame_equal(obs, exp, check_like=True)
def test_get_invalid_sample_names(self):
all_valid = ['2.sample.1', 'foo.bar.baz', 'roses', 'are', 'red',
'v10l3t5', '4r3', '81u3']
obs = qdb.metadata_template.util.get_invalid_sample_names(all_valid)
self.assertEqual(obs, [])
all_valid = ['sample.1', 'sample.2', 'SAMPLE.1', 'BOOOM']
obs = qdb.metadata_template.util.get_invalid_sample_names(all_valid)
self.assertEqual(obs, [])
def test_get_invalid_sample_names_str(self):
one_invalid = ['2.sample.1', 'foo.bar.baz', 'roses', 'are', 'red',
'I am the chosen one', 'v10l3t5', '4r3', '81u3']
obs = qdb.metadata_template.util.get_invalid_sample_names(one_invalid)
self.assertCountEqual(obs, ['I am the chosen one'])
one_invalid = ['2.sample.1', 'foo.bar.baz', 'roses', 'are', 'red',
':L{=<', ':L}=<', '4r3', '81u3']
obs = qdb.metadata_template.util.get_invalid_sample_names(one_invalid)
self.assertCountEqual(obs, [':L{=<', ':L}=<'])
def test_get_get_invalid_sample_names_mixed(self):
one_invalid = ['.', '1', '2']
obs = qdb.metadata_template.util.get_invalid_sample_names(one_invalid)
self.assertCountEqual(obs, [])
one_invalid = [' ', ' ', ' ']
obs = qdb.metadata_template.util.get_invalid_sample_names(one_invalid)
self.assertCountEqual(obs, [' ', ' ', ' '])
def test_looks_like_qiime_mapping_file(self):
obs = qdb.metadata_template.util.looks_like_qiime_mapping_file(
StringIO(EXP_SAMPLE_TEMPLATE))
self.assertFalse(obs)
obs = qdb.metadata_template.util.looks_like_qiime_mapping_file(
StringIO(QIIME_TUTORIAL_MAP_SUBSET))
self.assertTrue(obs)
mf = join(self.mfp, 'qiita_map_unicode.tsv')
obs = qdb.metadata_template.util.looks_like_qiime_mapping_file(mf)
self.assertTrue(obs)
obs = qdb.metadata_template.util.looks_like_qiime_mapping_file(
StringIO())
self.assertFalse(obs)
def test_parse_mapping_file(self):
# Tests ported over from QIIME
s1 = ['#sample\ta\tb', '#comment line to skip',
'x \t y \t z ', ' ', '#more skip', 'i\tj\tk']
exp = ([['x', 'y', 'z'], ['i', 'j', 'k']],
['sample', 'a', 'b'],
['comment line to skip', 'more skip'])
obs = qdb.metadata_template.util._parse_mapping_file(s1)
self.assertEqual(obs, exp)
# check that we strip double quotes by default
s2 = ['#sample\ta\tb', '#comment line to skip',
'"x "\t" y "\t z ', ' ', '"#more skip"', 'i\t"j"\tk']
obs = qdb.metadata_template.util._parse_mapping_file(s2)
self.assertEqual(obs, exp)
def test_get_pgsql_reserved_words(self):
# simply testing that at least one of the well know reserved words is
# in the list
obs = qdb.metadata_template.util.get_pgsql_reserved_words()
self.assertIn('select', obs)
def test_get_qiime2_reserved_words(self):
# simply testing that at least one of the well know reserved words is
# in the list
obs = qdb.metadata_template.util.get_qiime2_reserved_words()
self.assertIn('featureid', obs)
QIIME_TUTORIAL_MAP_SUBSET = (
"#SampleID\tBarcodeSequence\tLinkerPrimerSequence\tTreatment\tDOB\t"
"Description\n"
"PC.354\tAGCACGAGCCTA\tYATGCTGCCTCCCGTAGGAGT\tControl\t20061218\t"
"Control_mouse_I.D._354\n"
"PC.607\tAACTGTGCGTAC\tYATGCTGCCTCCCGTAGGAGT\tFast\t20071112\t"
"Fasting_mouse_I.D._607\n"
)
EXP_SAMPLE_TEMPLATE = (
"sample_name\tcollection_timestamp\tdescription\thas_extracted_data\t"
"has_physical_specimen\thost_subject_id\tint_column\tlatitude\tlongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\tstr_column\n"
"2.Sample1\t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\tNotIdentified"
"\t1\t42.42\t41.41\tlocation1\treceived\ttype1\tValue for sample 1\n"
"2.Sample2\t2014-05-29 12:24:51\tTest Sample 2\tTrue\tTrue\tNotIdentified"
"\t2\t4.2\t1.1\tlocation1\treceived\ttype1\tValue for sample 2\n"
"2.Sample3\t2014-05-29 12:24:51\tTest Sample 3\tTrue\tTrue\tNotIdentified"
"\t3\t4.8\t4.41\tlocation1\treceived\ttype1\tValue for sample 3\n")
EXP_SAMPLE_TEMPLATE_MULTICASE = (
"sAmPle_Name\tcollection_timestamp\tDescription\thas_extracted_data\t"
"has_physical_specimen\thost_Subject_id\tint_column\tlatitude\tLongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\tstr_CoLumn\n"
"2.Sample1\t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\tNotIdentified"
"\t1\t42.42\t41.41\tlocation1\treceived\ttype1\tValue for sample 1\n"
"2.Sample2\t2014-05-29 12:24:51\tTest Sample 2\tTrue\tTrue\tNotIdentified"
"\t2\t4.2\t1.1\tlocation1\treceived\ttype1\tValue for sample 2\n"
"2.Sample3\t2014-05-29 12:24:51\tTest Sample 3\tTrue\tTrue\tNotIdentified"
"\t3\t4.8\t4.41\tlocation1\treceived\ttype1\tValue for sample 3\n")
EXP_SAMPLE_TEMPLATE_LAT_ALL_INT = (
"sample_name\tcollection_timestamp\tdescription\thas_extracted_data\t"
"has_physical_specimen\thost_subject_id\tint_column\tlatitude\tlongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\tstr_column\n"
"2.Sample1\t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\tNotIdentified"
"\t1\t42\t41.41\tlocation1\treceived\ttype1\tValue for sample 1\n"
"2.Sample2\t2014-05-29 12:24:51\tTest Sample 2\tTrue\tTrue\tNotIdentified"
"\t2\t4\t1.1\tlocation1\treceived\ttype1\tValue for sample 2\n"
"2.Sample3\t2014-05-29 12:24:51\tTest Sample 3\tTrue\tTrue\tNotIdentified"
"\t3\t4\t4.41\tlocation1\treceived\ttype1\tValue for sample 3\n")
EXP_SAMPLE_TEMPLATE_LAT_MIXED_FLOAT_INT = (
"sample_name\tcollection_timestamp\tdescription\thas_extracted_data\t"
"has_physical_specimen\thost_subject_id\tint_column\tlatitude\tlongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\tstr_column\n"
"2.Sample1\t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\tNotIdentified"
"\t1\t42\t41.41\tlocation1\treceived\ttype1\tValue for sample 1\n"
"2.Sample2\t2014-05-29 12:24:51\tTest Sample 2\tTrue\tTrue\tNotIdentified"
"\t2\t4\t1.1\tlocation1\treceived\ttype1\tValue for sample 2\n"
"2.Sample3\t2014-05-29 12:24:51\tTest Sample 3\tTrue\tTrue\tNotIdentified"
"\t3\t4.8\t4.41\tlocation1\treceived\ttype1\tValue for sample 3\n")
EXP_SAMPLE_TEMPLATE_DUPE_COLS = (
"sample_name\tcollection_timestamp\tdescription\thas_extracted_data\t"
"has_physical_specimen\thost_subject_id\tlatitude\tlongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\t"
"str_column\tstr_column\n"
"2.Sample1\t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\t"
"NotIdentified\t42.42\t41.41\tlocation1\treceived\ttype1\t"
"Value for sample 1\tValue for sample 1\n"
"2.Sample2\t2014-05-29 12:24:51\t"
"Test Sample 2\tTrue\tTrue\tNotIdentified\t4.2\t1.1\tlocation1\treceived\t"
"type1\tValue for sample 2\tValue for sample 2\n"
"2.Sample3\t2014-05-29 12:24:51\tTest Sample 3\tTrue\t"
"True\tNotIdentified\t4.8\t4.41\tlocation1\treceived\ttype1\t"
"Value for sample 3\tValue for sample 3\n")
EXP_SAMPLE_TEMPLATE_SPACES = (
"sample_name\tcollection_timestamp\tdescription\thas_extracted_data\t"
"has_physical_specimen\thost_subject_id\tint_column\tlatitude\tlongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\t"
"str_column\n"
"2.Sample1 \t2014-05-29 12:24:51\tTest Sample 1\t"
'"True\t"\t"\nTrue"\t'
"NotIdentified\t1\t42.42\t41.41\tlocation1\treceived\ttype1\t"
"Value for sample 1\n"
"2.Sample2 \t2014-05-29 12:24:51\t"
"Test Sample 2\tTrue\tTrue\tNotIdentified\t2\t4.2\t1.1\tlocation1\t"
"received\ttype1\tValue for sample 2\n"
"2.Sample3\t2014-05-29 12:24:51\tTest Sample 3\tTrue\t"
"True\tNotIdentified\t3\t4.8\t4.41\tlocation1\treceived\ttype1\t"
"Value for sample 3\n")
EXP_SAMPLE_TEMPLATE_WHITESPACE = (
"sample_name \tcollection_timestamp\t description \thas_extracted_data\t"
"has_physical_specimen\thost_subject_id\tint_column\tlatitude\tlongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\t"
"str_column\n"
"2.Sample1\t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\t"
"NotIdentified\t1\t42.42\t41.41\tlocation1\treceived\ttype1\t"
"Value for sample 1\n"
"2.Sample2\t 2014-05-29 12:24:51 \t"
"Test Sample 2\tTrue\tTrue\tNotIdentified\t2\t4.2\t1.1\tlocation1\t"
"received\ttype1\t Value for sample 2\n"
"2.Sample3\t2014-05-29 12:24:51\t Test Sample 3 \tTrue\t"
"True\tNotIdentified\t3\t4.8\t4.41\tlocation1\treceived\ttype1\t"
"Value for sample 3\n")
EXP_SAMPLE_TEMPLATE_SPACES_EMPTY_ROW = (
"sample_name\tcollection_timestamp\tdescription\thas_extracted_data\t"
"has_physical_specimen\thost_subject_id\tint_column\tlatitude\t"
"longitude\t physical_location\trequired_sample_info_status"
"\tsample_type\tstr_column\n"
" 2.Sample1 \t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\t"
"NotIdentified\t1\t42.42\t41.41\tlocation1\treceived\ttype1\t"
"Value for sample 1\n"
" 2.Sample2 \t2014-05-29 12:24:51\t"
"Test Sample 2\tTrue\tTrue\tNotIdentified\t2\t4.2\t1.1\tlocation1\t"
"received\ttype1\tValue for sample 2\n"
"2.Sample3\t2014-05-29 12:24:51\tTest Sample 3\tTrue\t"
"True\tNotIdentified\t3\t4.8\t4.41\tlocation1\treceived\ttype1\t"
"Value for sample 3\n"
"\t\t\t\t\t\t\t\t\t\t\t\t\n"
"\t\t\t\t\t\t\t\t\t\t \t\t\n")
EXP_SAMPLE_TEMPLATE_WITH_SAMPLE_ID = (
"sample_name\tcollection_timestamp\tdescription\thas_extracted_data\t"
"has_physical_specimen\thost_subject_id\tint_column\tlatitude\tlongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\tstr_column\t"
"sample_id\tsample-id\n"
"2.Sample1\t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\tNotIdentified"
"\t1\t42.42\t41.41\tlocation1\treceived\ttype1\tValue for sample 1\tA\ta\n"
"2.Sample2\t2014-05-29 12:24:51\tTest Sample 2\tTrue\tTrue\tNotIdentified"
"\t2\t4.2\t1.1\tlocation1\treceived\ttype1\tValue for sample 2\tB\tb\n"
"2.Sample3\t2014-05-29 12:24:51\tTest Sample 3\tTrue\tTrue\tNotIdentified"
"\t3\t4.8\t4.41\tlocation1\treceived\ttype1\tValue for sample 3\tC\tc\n")
EXP_ST_SPACES_EMPTY_COLUMN = (
"sample_name\tcollection_timestamp\tdescription\thas_extracted_data\t"
"has_physical_specimen\thost_subject_id\tint_column\tlatitude\tlongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\t"
"str_column\t\n"
"2.Sample1 \t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\t"
"NotIdentified\t1\t42.42\t41.41\tlocation1\treceived\ttype1\t"
"Value for sample 1\t\n"
"2.Sample2 \t2014-05-29 12:24:51\t"
"Test Sample 2\tTrue\tTrue\tNotIdentified\t2\t4.2\t1.1\tlocation1\t"
"received\ttype1\tValue for sample 2\t\n"
"2.Sample3\t2014-05-29 12:24:51\tTest Sample 3\tTrue\t"
"True\tNotIdentified\t3\t4.8\t4.41\tlocation1\treceived\ttype1\t"
"Value for sample 3\t\n")
EXP_SAMPLE_TEMPLATE_NUMBER_SAMPLE_NAMES = (
"sample_name\tcollection_timestamp\tdescription\thas_extracted_data\t"
"has_physical_specimen\thost_subject_id\tlatitude\tlongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\t"
"str_column\n"
"002.000\t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\t"
"NotIdentified\t42.42\t41.41\tlocation1\treceived\ttype1\t"
"Value for sample 1\n"
"1.11111\t2014-05-29 12:24:51\t"
"Test Sample 2\tTrue\tTrue\tNotIdentified\t4.2\t1.1\tlocation1\treceived\t"
"type1\tValue for sample 2\n"
"0.12121\t2014-05-29 12:24:51\tTest Sample 3\tTrue\t"
"True\tNotIdentified\t4.8\t4.41\tlocation1\treceived\ttype1\t"
"Value for sample 3\n")
SAMPLE_TEMPLATE_NO_SAMPLE_NAMES = (
"sample_name\tcollection_timestamp\tdescription\thas_extracted_data\t"
"has_physical_specimen\thost_subject_id\tint_column\tlatitude\tlongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\t"
"str_column\n"
"2.Sample1\t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\t"
"NotIdentified\t1\t42.42\t41.41\tlocation1\treceived\ttype1\t"
"Value for sample 1\n"
"2.Sample2\t2014-05-29 12:24:51\t"
"Test Sample 2\tTrue\tTrue\tNotIdentified\t2\t4.2\t1.1\tlocation1\t"
"received\ttype1\tValue for sample 2\n"
"2.Sample3\t2014-05-29 12:24:51\tTest Sample 3\tTrue\t"
"True\tNotIdentified\t3\t4.8\t4.41\tlocation1\treceived\ttype1\t"
"Value for sample 3\n"
"\t2014-05-29 12:24:51\tTest Sample 3\tTrue\t"
"True\tNotIdentified\t4.8\t4.41\tlocation1\treceived\ttype1\t"
"Value for sample 3\n"
"\t\t\t\t\t\t\t\t\t\t\t\n"
)
SAMPLE_TEMPLATE_NO_SAMPLE_NAMES_SOME_SPACES = (
"sample_name\tcollection_timestamp\tdescription\thas_extracted_data\t"
"has_physical_specimen\thost_subject_id\tint_column\tlatitude\tlongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\t"
"str_column\n"
"2.Sample1\t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\t"
"NotIdentified\t1\t42.42\t41.41\tlocation1\treceived\ttype1\t"
"Value for sample 1\n"
"2.Sample2\t2014-05-29 12:24:51\t"
"Test Sample 2\tTrue\tTrue\tNotIdentified\t2\t4.2\t1.1\tlocation1\t"
"received\ttype1\tValue for sample 2\n"
"2.Sample3\t2014-05-29 12:24:51\tTest Sample 3\tTrue\t"
"True\tNotIdentified\t3\t4.8\t4.41\tlocation1\treceived\ttype1\t"
"Value for sample 3\n"
"\t\t\t\t\t \t\t\t\t\t \t\t\n"
)
SAMPLE_TEMPLATE_EMPTY_COLUMN = (
"sample_name\tcollection_timestamp\tdescription\thas_extracted_data\t"
"has_physical_specimen\thost_subject_id\tlatitude\tlongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\t"
"str_column\n"
"2.Sample1\t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\t"
"NotIdentified\t42.42\t41.41\tlocation1\treceived\ttype1\t"
"\n"
"2.Sample2\t2014-05-29 12:24:51\t"
"Test Sample 2\tTrue\tTrue\tNotIdentified\t4.2\t1.1\tlocation1\treceived\t"
"type1\t\n"
"2.Sample3\t2014-05-29 12:24:51\tTest Sample 3\tTrue\t"
"True\tNotIdentified\t4.8\t4.41\tlocation1\treceived\ttype1\t"
"\n")
SAMPLE_TEMPLATE_COLUMN_WITH_NAS = (
"sample_name\tcollection_timestamp\tdescription\thas_extracted_data\t"
"has_physical_specimen\thost_subject_id\tlatitude\tlongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\t"
"str_column\n"
"2.Sample1\t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\t"
"NotIdentified\t42.42\t41.41\tlocation1\treceived\ttype1\t"
"NA\n"
"2.Sample2\t2014-05-29 12:24:51\t"
"Test Sample 2\tTrue\tTrue\tNotIdentified\t4.2\t1.1\tlocation1\treceived\t"
"type1\tNA\n"
"2.Sample3\t2014-05-29 12:24:51\tTest Sample 3\tTrue\t"
"True\tNotIdentified\t4.8\t4.41\tlocation1\treceived\ttype1\t"
"NA\n")
SAMPLE_TEMPLATE_NO_SAMPLE_NAME = (
":L}={\tcollection_timestamp\tdescription\thas_extracted_data\t"
"has_physical_specimen\thost_subject_id\tlatitude\tlongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\t"
"str_column\n"
"2.Sample1\t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\t"
"NotIdentified\t42.42\t41.41\tlocation1\treceived\ttype1\t"
"NA\n"
"2.Sample2\t2014-05-29 12:24:51\t"
"Test Sample 2\tTrue\tTrue\tNotIdentified\t4.2\t1.1\tlocation1\treceived\t"
"type1\tNA\n"
"2.Sample3\t2014-05-29 12:24:51\tTest Sample 3\tTrue\t"
"True\tNotIdentified\t4.8\t4.41\tlocation1\treceived\ttype1\t"
"NA\n")
SAMPLE_TEMPLATE_INVALID_LONGITUDE_COLUMNS = (
"sample_name\tcollection_timestamp\tdescription\thas_extracted_data\t"
"has_physical_specimen\thost_subject_id\tlatitude\tlongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\t"
"str_column\n"
"2.Sample1\t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\t"
"1\t11.42\t41.41\tlocation1\treceived\ttype1\t"
"Value for sample 1\n"
"2.Sample2\t2014-05-29 12:24:51\t"
"Test Sample 2\tTrue\tTrue\1\t4.2\tXXX\tlocation1\treceived\t"
"type1\tValue for sample 2\n"
"2.Sample3\t2014-05-29 12:24:51\tTest Sample 3\tTrue\t"
"True\1\t4.8\t4.XXXXX41\tlocation1\treceived\ttype1\t"
"Value for sample 3\n")
EXP_SAMPLE_TEMPLATE_NULLS = (
"sample_name\tmy_bool_col\tmy_bool_col_w_nulls\n"
"sample.1\tTrue\tFalse\n"
"sample.2\tFalse\tUnknown\n"
"sample.3\tTrue\tTrue\n"
"sample.4\tFalse\t\n"
"sample.5\tTrue\tTrue\n"
"sample.6\tFalse\tTrue\n")
SAMPLE_TEMPLATE_NULLS_DICT = {
'my_bool_col': {"sample.1": 'True',
"sample.2": 'False',
"sample.3": 'True',
"sample.4": 'False',
"sample.5": 'True',
"sample.6": 'False'},
'my_bool_col_w_nulls': {"sample.1": 'False',
"sample.2": 'Unknown',
"sample.3": 'True',
"sample.4": '',
"sample.5": 'True',
"sample.6": 'True'}
}
SAMPLE_TEMPLATE_DICT_FORM = {
'collection_timestamp': {'2.Sample1': '2014-05-29 12:24:51',
'2.Sample2': '2014-05-29 12:24:51',
'2.Sample3': '2014-05-29 12:24:51'},
'description': {'2.Sample1': 'Test Sample 1',
'2.Sample2': 'Test Sample 2',
'2.Sample3': 'Test Sample 3'},
'has_extracted_data': {'2.Sample1': 'True',
'2.Sample2': 'True',
'2.Sample3': 'True'},
'has_physical_specimen': {'2.Sample1': 'True',
'2.Sample2': 'True',
'2.Sample3': 'True'},
'host_subject_id': {'2.Sample1': 'NotIdentified',
'2.Sample2': 'NotIdentified',
'2.Sample3': 'NotIdentified'},
'latitude': {'2.Sample1': '42.42',
'2.Sample2': '4.2',
'2.Sample3': '4.8'},
'longitude': {'2.Sample1': '41.41',
'2.Sample2': '1.1',
'2.Sample3': '4.41'},
'physical_location': {'2.Sample1': 'location1',
'2.Sample2': 'location1',
'2.Sample3': 'location1'},
'required_sample_info_status': {'2.Sample1': 'received',
'2.Sample2': 'received',
'2.Sample3': 'received'},
'sample_type': {'2.Sample1': 'type1',
'2.Sample2': 'type1',
'2.Sample3': 'type1'},
'str_column': {'2.Sample1': 'Value for sample 1',
'2.Sample2': 'Value for sample 2',
'2.Sample3': 'Value for sample 3'},
'int_column': {'2.Sample1': '1',
'2.Sample2': '2',
'2.Sample3': '3'}
}
SAMPLE_TEMPLATE_LAT_ALL_INT_DICT = {
'collection_timestamp': {'2.Sample1': '2014-05-29 12:24:51',
'2.Sample2': '2014-05-29 12:24:51',
'2.Sample3': '2014-05-29 12:24:51'},
'description': {'2.Sample1': 'Test Sample 1',
'2.Sample2': 'Test Sample 2',
'2.Sample3': 'Test Sample 3'},
'has_extracted_data': {'2.Sample1': 'True',
'2.Sample2': 'True',
'2.Sample3': 'True'},
'has_physical_specimen': {'2.Sample1': 'True',
'2.Sample2': 'True',
'2.Sample3': 'True'},
'host_subject_id': {'2.Sample1': 'NotIdentified',
'2.Sample2': 'NotIdentified',
'2.Sample3': 'NotIdentified'},
'latitude': {'2.Sample1': '42',
'2.Sample2': '4',
'2.Sample3': '4'},
'longitude': {'2.Sample1': '41.41',
'2.Sample2': '1.1',
'2.Sample3': '4.41'},
'physical_location': {'2.Sample1': 'location1',
'2.Sample2': 'location1',
'2.Sample3': 'location1'},
'required_sample_info_status': {'2.Sample1': 'received',
'2.Sample2': 'received',
'2.Sample3': 'received'},
'sample_type': {'2.Sample1': 'type1',
'2.Sample2': 'type1',
'2.Sample3': 'type1'},
'str_column': {'2.Sample1': 'Value for sample 1',
'2.Sample2': 'Value for sample 2',
'2.Sample3': 'Value for sample 3'},
'int_column': {'2.Sample1': '1',
'2.Sample2': '2',
'2.Sample3': '3'}
}
SAMPLE_TEMPLATE_MIXED_FLOAT_INT_DICT = {
'collection_timestamp': {'2.Sample1': '2014-05-29 12:24:51',
'2.Sample2': '2014-05-29 12:24:51',
'2.Sample3': '2014-05-29 12:24:51'},
'description': {'2.Sample1': 'Test Sample 1',
'2.Sample2': 'Test Sample 2',
'2.Sample3': 'Test Sample 3'},
'has_extracted_data': {'2.Sample1': 'True',
'2.Sample2': 'True',
'2.Sample3': 'True'},
'has_physical_specimen': {'2.Sample1': 'True',
'2.Sample2': 'True',
'2.Sample3': 'True'},
'host_subject_id': {'2.Sample1': 'NotIdentified',
'2.Sample2': 'NotIdentified',
'2.Sample3': 'NotIdentified'},
'latitude': {'2.Sample1': '42',
'2.Sample2': '4',
'2.Sample3': '4.8'},
'longitude': {'2.Sample1': '41.41',
'2.Sample2': '1.1',
'2.Sample3': '4.41'},
'physical_location': {'2.Sample1': 'location1',
'2.Sample2': 'location1',
'2.Sample3': 'location1'},
'required_sample_info_status': {'2.Sample1': 'received',
'2.Sample2': 'received',
'2.Sample3': 'received'},
'sample_type': {'2.Sample1': 'type1',
'2.Sample2': 'type1',
'2.Sample3': 'type1'},
'str_column': {'2.Sample1': 'Value for sample 1',
'2.Sample2': 'Value for sample 2',
'2.Sample3': 'Value for sample 3'},
'int_column': {'2.Sample1': '1',
'2.Sample2': '2',
'2.Sample3': '3'}
}
SAMPLE_TEMPLATE_NUMBER_SAMPLE_NAMES_DICT_FORM = {
'collection_timestamp': {'002.000': '2014-05-29 12:24:51',
'1.11111': '2014-05-29 12:24:51',
'0.12121': '2014-05-29 12:24:51'},
'description': {'002.000': 'Test Sample 1',
'1.11111': 'Test Sample 2',
'0.12121': 'Test Sample 3'},
'has_extracted_data': {'002.000': 'True',
'1.11111': 'True',
'0.12121': 'True'},
'has_physical_specimen': {'002.000': 'True',
'1.11111': 'True',
'0.12121': 'True'},
'host_subject_id': {'002.000': 'NotIdentified',
'1.11111': 'NotIdentified',
'0.12121': 'NotIdentified'},
'latitude': {'002.000': '42.42',
'1.11111': '4.2',
'0.12121': '4.8'},
'longitude': {'002.000': '41.41',
'1.11111': '1.1',
'0.12121': '4.41'},
'physical_location': {'002.000': 'location1',
'1.11111': 'location1',
'0.12121': 'location1'},
'required_sample_info_status': {'002.000': 'received',
'1.11111': 'received',
'0.12121': 'received'},
'sample_type': {'002.000': 'type1',
'1.11111': 'type1',
'0.12121': 'type1'},
'str_column': {'002.000': 'Value for sample 1',
'1.11111': 'Value for sample 2',
'0.12121': 'Value for sample 3'}}
ST_EMPTY_COLUMN_DICT_FORM = \
{'collection_timestamp': {'2.Sample1': '2014-05-29 12:24:51',
'2.Sample2': '2014-05-29 12:24:51',
'2.Sample3': '2014-05-29 12:24:51'},
'description': {'2.Sample1': 'Test Sample 1',
'2.Sample2': 'Test Sample 2',
'2.Sample3': 'Test Sample 3'},
'has_extracted_data': {'2.Sample1': 'True',
'2.Sample2': 'True',
'2.Sample3': 'True'},
'has_physical_specimen': {'2.Sample1': 'True',
'2.Sample2': 'True',
'2.Sample3': 'True'},
'host_subject_id': {'2.Sample1': 'NotIdentified',
'2.Sample2': 'NotIdentified',
'2.Sample3': 'NotIdentified'},
'latitude': {'2.Sample1': '42.42',
'2.Sample2': '4.2',
'2.Sample3': '4.8'},
'longitude': {'2.Sample1': '41.41',
'2.Sample2': '1.1',
'2.Sample3': '4.41'},
'physical_location': {'2.Sample1': 'location1',
'2.Sample2': 'location1',
'2.Sample3': 'location1'},
'required_sample_info_status': {'2.Sample1': 'received',
'2.Sample2': 'received',
'2.Sample3': 'received'},
'sample_type': {'2.Sample1': 'type1',
'2.Sample2': 'type1',
'2.Sample3': 'type1'}}
ST_COLUMN_WITH_NAS_DICT_FORM = \
{'collection_timestamp': {'2.Sample1': '2014-05-29 12:24:51',
'2.Sample2': '2014-05-29 12:24:51',
'2.Sample3': '2014-05-29 12:24:51'},
'description': {'2.Sample1': 'Test Sample 1',
'2.Sample2': 'Test Sample 2',
'2.Sample3': 'Test Sample 3'},
'has_extracted_data': {'2.Sample1': 'True',
'2.Sample2': 'True',
'2.Sample3': 'True'},
'has_physical_specimen': {'2.Sample1': 'True',
'2.Sample2': 'True',
'2.Sample3': 'True'},
'host_subject_id': {'2.Sample1': 'NotIdentified',
'2.Sample2': 'NotIdentified',
'2.Sample3': 'NotIdentified'},
'latitude': {'2.Sample1': '42.42',
'2.Sample2': '4.2',
'2.Sample3': '4.8'},
'longitude': {'2.Sample1': '41.41',
'2.Sample2': '1.1',
'2.Sample3': '4.41'},
'physical_location': {'2.Sample1': 'location1',
'2.Sample2': 'location1',
'2.Sample3': 'location1'},
'required_sample_info_status': {'2.Sample1': 'received',
'2.Sample2': 'received',
'2.Sample3': 'received'},
'sample_type': {'2.Sample1': 'type1',
'2.Sample2': 'type1',
'2.Sample3': 'type1'},
'str_column': {'2.Sample1': 'NA', '2.Sample2': 'NA', '2.Sample3': 'NA'}}
QIIME_TUTORIAL_MAP_DICT_FORM = {
'BarcodeSequence': {'PC.354': 'AGCACGAGCCTA',
'PC.607': 'AACTGTGCGTAC'},
'LinkerPrimerSequence': {'PC.354': 'YATGCTGCCTCCCGTAGGAGT',
'PC.607': 'YATGCTGCCTCCCGTAGGAGT'},
'Treatment': {'PC.354': 'Control',
'PC.607': 'Fast'},
'DOB': {'PC.354': '20061218',
'PC.607': '20071112'},
'Description': {'PC.354': 'Control_mouse_I.D._354',
'PC.607': 'Fasting_mouse_I.D._607'}
}
EXP_PREP_TEMPLATE = (
'sample_name\tbarcodesequence\tcenter_name\tcenter_project_name\t'
'ebi_submission_accession\temp_status\texperiment_design_description\t'
'library_construction_protocol\tlinkerprimersequence\tplatform\t'
'run_prefix\tstr_column\n'
'1.SKB7.640196\tCCTCTGAGAGCT\tANL\tTest Project\tNone\tEMP\tBBBB\tAAAA\t'
'GTGCCAGCMGCCGCGGTAA\tIllumina\ts_G1_L002_sequences\tValue for sample 3\n'
'1.SKB8.640193\tGTCCGCAAGTTA\tANL\tTest Project\tNone\tEMP\tBBBB\tAAAA\t'
'GTGCCAGCMGCCGCGGTAA\tIllumina\ts_G1_L001_sequences\tValue for sample 1\n'
'1.SKD8.640184\tCGTAGAGCTCTC\tANL\tTest Project\tNone\tEMP\tBBBB\tAAAA\t'
'GTGCCAGCMGCCGCGGTAA\tIllumina\ts_G1_L001_sequences\tValue for sample 2\n')
EXP_QIIMP = {
'asfaewf': {'sample': 'f', 'oijnmk': 'f'},
'pheno': {'sample': 'med', 'oijnmk': 'missing: not provided'},
'bawer': {'sample': 'a', 'oijnmk': 'b'},
'aelrjg': {'sample': 'asfe', 'oijnmk': 'asfs'}
}
EXP_NOT_QIIMP = {
'myownidea': {
'sample5': 'I skipped some',
'sample1': 'sampleoneinfo',
'sample2': 'sampletwoinfo'}
}
if __name__ == '__main__':
main()
| bsd-3-clause |
sibis-platform/ncanda-data-integration | scripts/qc/make_redcap_inventory.py | 2 | 8195 | #!/usr/bin/env python
"""
Given an API, load all the forms, count up their non-NA values, and mark their missing/complete status.
"""
import argparse
import pandas as pd
import pdb
import redcap as rc
import sys
from load_utils import load_form_with_primary_key
from qa_utils import chunked_form_export, get_items_matching_regex
import sibispy
from sibispy import sibislogger as slog
from typing import List
def parse_args(input_args: List = None) -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", help="Verbose operation",
action="store_true")
parser.add_argument("-p", "--post-to-github",
help="Post all issues to GitHub instead of stdout.",
action="store_true")
parser.add_argument("-a", "--all-forms",
action='store_true')
parser.add_argument("-f", "--forms",
nargs='+')
parser.add_argument("-e", "--events",
nargs='*')
parser.add_argument('-o', '--output',
help="File to save the inventory to",
default=sys.stdout)
parser.add_argument('-s', '--api',
help="Source Redcap API to use",
default="data_entry")
parser.add_argument('-d', '--include-dag',
help="Include Redcap Data Access Group for each row",
dest="include_dag",
action="store_true")
args = parser.parse_args(input_args)
return args
def make_redcap_inventory(api: rc.Project,
forms: List,
events: List = None,
post_to_github: bool = False,
include_dag: bool = False,
verbose: bool = False) -> pd.DataFrame:
# Determine scope
meta = api.export_metadata(format='df')
all_forms = meta['form_name'].unique().tolist()
if forms is not None:
all_forms = [form for form in all_forms if form in forms]
# always load visit_ignore___yes - if whole visit is ignored, then this
# form should be, too (very NCANDA-specific)
data = {form: chunked_form_export(api, forms=[form], events=events,
include_dag=include_dag,
fields=['visit_ignore'])
for form in all_forms}
final_dfs = []
for form in all_forms:
try:
form_stats = data[form].apply(get_flag_and_meta, axis=1)
except ValueError:
continue
form_stats['form_name'] = form
form_stats['status'] = make_classification(form_stats)
final_dfs.append(form_stats)
return pd.concat(final_dfs, sort=False)
# Apply to DF to get all empty records
def get_flag_and_meta(row: pd.Series, verbose: bool = True) -> pd.Series:
try:
columns = row.columns.tolist()
except Exception as e:
# No columns in a Series
columns = row.index.tolist()
cols_dag = get_items_matching_regex('redcap_data_access_group', columns)
cols_complete = get_items_matching_regex(
"_complete$|^np_reyo_qc___completed$", columns)
# np_gpeg_exclusion isn't exactly right - it's like a partial missingness
# reason?
cols_ignore = get_items_matching_regex(
"^visit_ignore___yes$|_exclude$|^np_gpeg_exclusion", columns)
cols_missing = get_items_matching_regex(
# "^np_reyo_qc___completed$|^bio_mr_same_as_np_day___yes$|_missing$",
"^bio_mr_same_as_np___yes$|_missing$", columns)
cols_missing_explanation = get_items_matching_regex(
"_missing_why(_other)?$", columns)
cols_checklists = get_items_matching_regex('___', columns)
# Only keep "normal" checklists that aren't a part of any other things
cols_checklists_pure = (set(cols_checklists)
- set(cols_ignore)
- set(cols_complete)
- set(cols_missing))
# after a set operation, preserve order:
cols_checklists_pure = [c for c in cols_checklists
if c in cols_checklists_pure]
all_meta_cols = (cols_complete + cols_ignore + cols_missing + cols_dag
+ cols_missing_explanation + cols_checklists)
result = {}
if len(cols_dag) > 0:
result.update({'dag': row['redcap_data_access_group']})
non_nan_count = row.drop(all_meta_cols).notnull().sum()
result.update({'non_nan_count': non_nan_count})
# Count checklists properly
if cols_checklists_pure:
col_val1_count = (row[cols_checklists_pure].isin([1, '1'])).sum()
result.update({'non_nan_count': non_nan_count + col_val1_count})
# There *can* be multiple sort-of exclusion/missingness columns (for one,
# we're including visit_ignore___yes on all forms, and some have their own
# `exclude` switches) - so we'll just assume that wherever there's at least
# one 1 flipped for exclusion, the form is excluded. Same with missingness.
#
# Taking the max of multiple columns is just a quick way to do that.
if cols_ignore:
result.update({'exclude': row[cols_ignore].max(skipna=True)})
if cols_missing:
result.update({'missing': row[cols_missing].max(skipna=True)})
if len(cols_complete) > 0:
if 'np_reyo_qc___completed' in cols_complete:
# special case: for Reyo QC, the checklist is a completion status,
# so we should consider it, but the form should also be marked
# Complete
result.update({'complete': row[cols_complete].max(skipna=True)})
else:
# take the last completion status, on the assumption that it's the
# overall completion (currently not implementing LSSAGA subparts)
result.update({'complete': row[cols_complete[-1]]})
return pd.Series(result)
def make_classification(form: pd.DataFrame) -> pd.Series:
"""
Return an indexed series of content classifications (present, missing,
excluded, empty)
"""
output = pd.Series(index=form.index)
try:
idx_missing = form['missing'] == 1
output.loc[idx_missing] = 'MISSING'
except KeyError:
pass
idx_exclude = form['exclude'] == 1
idx_present = ((form['non_nan_count'] > 0)
& (form['exclude'] != 1))
idx_empty = form['non_nan_count'] == 0
if 'missing' in form:
# NOTE: This is failing for cases where Rey-O wasn't
# done, so Figure Scores are all hidden and couldn't
# be done either
idx_empty = idx_empty & (form['missing'] != 1)
output.loc[idx_exclude] = 'EXCLUDED'
# output.loc[idx_present & ~idx_missing] = 'PRESENT'
# overrides MISSING in cases that have content
output.loc[idx_present] = 'PRESENT'
output.loc[idx_empty] = 'EMPTY'
return output
if __name__ == '__main__':
args = parse_args()
session = sibispy.Session()
if not session.configure():
sys.exit()
slog.init_log(verbose=args.verbose,
post_to_github=args.post_to_github,
github_issue_title='QC: Save content stats by form',
github_issue_label='inventory',
timerDir=None)
# Setting specific constants for this run of QC
api = session.connect_server(args.api, timeFlag=True)
try:
if args.all_forms:
forms = None
else:
forms = args.forms
inventory = make_redcap_inventory(api=api,
forms=forms,
events=args.events,
post_to_github=args.post_to_github,
include_dag=args.include_dag,
verbose=args.verbose)
inventory.to_csv(args.output, float_format="%.0f")
sys.exit(0)
except Exception as e:
print(e)
if args.verbose:
print(sys.exc_info()[0])
sys.exit(1)
| bsd-3-clause |
claesenm/optunity-benchmark | optimizers/tpe/hyperopt_august2013_mod_src/hyperopt/tests/test_tpe.py | 3 | 27620 | import unittest
import os
import nose
import numpy as np
try:
import matplotlib.pyplot as plt
except ImportError:
pass
from hyperopt import pyll
from hyperopt.pyll import scope
import hyperopt.bandits
from hyperopt import Bandit
from hyperopt import Experiment
from hyperopt import Random
from hyperopt import Trials
from hyperopt.base import miscs_to_idxs_vals
from hyperopt.bandits import quadratic1
from hyperopt.bandits import q1_lognormal
from hyperopt.bandits import n_arms
from hyperopt.bandits import distractor
from hyperopt.bandits import gauss_wave
from hyperopt.bandits import gauss_wave2
from hyperopt.pyll_utils import hp_choice
from hyperopt.pyll_utils import hp_randint
from hyperopt.pyll_utils import hp_uniform, hp_loguniform
from hyperopt.pyll_utils import hp_quniform, hp_qloguniform
from hyperopt.pyll_utils import hp_normal, hp_lognormal
from hyperopt.pyll_utils import hp_qnormal, hp_qlognormal
from hyperopt.tpe import adaptive_parzen_normal_orig
#from hyperopt.tpe import adaptive_parzen_normal
from hyperopt.tpe import TreeParzenEstimator
from hyperopt.tpe import GMM1
from hyperopt.tpe import GMM1_lpdf
from hyperopt.tpe import LGMM1
from hyperopt.tpe import LGMM1_lpdf
DO_SHOW = int(os.getenv('HYPEROPT_SHOW', '0'))
@hyperopt.as_bandit(loss_target=0)
def many_dists():
a=hp_choice('a', [0, 1, 2])
b=hp_randint('b', 10)
c=hp_uniform('c', 4, 7)
d=hp_loguniform('d', -2, 0)
e=hp_quniform('e', 0, 10, 3)
f=hp_qloguniform('f', 0, 3, 2)
g=hp_normal('g', 4, 7)
h=hp_lognormal('h', -2, 2)
i=hp_qnormal('i', 0, 10, 2)
j=hp_qlognormal('j', 0, 2, 1)
z = a + b + c + d + e + f + g + h + i + j
return {'loss': scope.float(scope.log(1e-12 + z ** 2))}
def test_adaptive_parzen_normal_orig():
rng = np.random.RandomState(123)
prior_mu = 7
prior_sigma = 2
mus = rng.randn(10) + 5
weights2, mus2, sigmas2 = adaptive_parzen_normal_orig(
mus, 3.3, prior_mu, prior_sigma)
print weights2
print mus2
print sigmas2
assert len(weights2) == len(mus2) == len(sigmas2) == 11
assert np.all(weights2[0] > weights2[1:])
assert mus2[0] == 7
assert np.all(mus2[1:] == mus)
assert sigmas2[0] == 2
class TestGMM1(unittest.TestCase):
def setUp(self):
self.rng = np.random.RandomState(234)
def test_mu_is_used_correctly(self):
assert np.allclose(10,
GMM1([1], [10.0], [0.0000001], rng=self.rng))
def test_sigma_is_used_correctly(self):
samples = GMM1([1], [0.0], [10.0], size=[1000], rng=self.rng)
assert 9 < np.std(samples) < 11
def test_mus_make_variance(self):
samples = GMM1([.5, .5], [0.0, 1.0], [0.000001, 0.000001],
rng=self.rng, size=[1000])
print samples.shape
#import matplotlib.pyplot as plt
#plt.hist(samples)
#plt.show()
assert .45 < np.mean(samples) < .55, np.mean(samples)
assert .2 < np.var(samples) < .3, np.var(samples)
def test_weights(self):
samples = GMM1([.9999, .0001], [0.0, 1.0], [0.000001, 0.000001],
rng=self.rng,
size=[1000])
assert samples.shape == (1000,)
#import matplotlib.pyplot as plt
#plt.hist(samples)
#plt.show()
assert -.001 < np.mean(samples) < .001, np.mean(samples)
assert np.var(samples) < .0001, np.var(samples)
def test_mat_output(self):
samples = GMM1([.9999, .0001], [0.0, 1.0], [0.000001, 0.000001],
rng=self.rng,
size=[40, 20])
assert samples.shape == (40, 20)
assert -.001 < np.mean(samples) < .001, np.mean(samples)
assert np.var(samples) < .0001, np.var(samples)
def test_lpdf_scalar_one_component(self):
llval = GMM1_lpdf(1.0, # x
[1.], # weights
[1.0], # mu
[2.0], # sigma
)
assert llval.shape == ()
assert np.allclose(llval,
np.log(1.0 / np.sqrt(2 * np.pi * 2.0 ** 2)))
def test_lpdf_scalar_N_components(self):
llval = GMM1_lpdf(1.0, # x
[0.25, 0.25, .5], # weights
[0.0, 1.0, 2.0], # mu
[1.0, 2.0, 5.0], # sigma
)
a = (.25 / np.sqrt(2 * np.pi * 1.0 ** 2)
* np.exp(-.5 * (1.0) ** 2))
a += (.25 / np.sqrt(2 * np.pi * 2.0 ** 2))
a += (.5 / np.sqrt(2 * np.pi * 5.0 ** 2)
* np.exp(-.5 * (1.0 / 5.0) ** 2))
def test_lpdf_vector_N_components(self):
llval = GMM1_lpdf([1.0, 0.0], # x
[0.25, 0.25, .5], # weights
[0.0, 1.0, 2.0], # mu
[1.0, 2.0, 5.0], # sigma
)
# case x = 1.0
a = (.25 / np.sqrt(2 * np.pi * 1.0 ** 2)
* np.exp(-.5 * (1.0) ** 2))
a += (.25 / np.sqrt(2 * np.pi * 2.0 ** 2))
a += (.5 / np.sqrt(2 * np.pi * 5.0 ** 2)
* np.exp(-.5 * (1.0 / 5.0) ** 2))
assert llval.shape == (2,)
assert np.allclose(llval[0], np.log(a))
# case x = 0.0
a = (.25 / np.sqrt(2 * np.pi * 1.0 ** 2))
a += (.25 / np.sqrt(2 * np.pi * 2.0 ** 2)
* np.exp(-.5 * (1.0 / 2.0) ** 2))
a += (.5 / np.sqrt(2 * np.pi * 5.0 ** 2)
* np.exp(-.5 * (2.0 / 5.0) ** 2))
assert np.allclose(llval[1], np.log(a))
def test_lpdf_matrix_N_components(self):
llval = GMM1_lpdf(
[
[1.0, 0.0, 0.0],
[0, 0, 1],
[0, 0, 1000],
],
[0.25, 0.25, .5], # weights
[0.0, 1.0, 2.0], # mu
[1.0, 2.0, 5.0], # sigma
)
print llval
assert llval.shape == (3, 3)
a = (.25 / np.sqrt(2 * np.pi * 1.0 ** 2)
* np.exp(-.5 * (1.0) ** 2))
a += (.25 / np.sqrt(2 * np.pi * 2.0 ** 2))
a += (.5 / np.sqrt(2 * np.pi * 5.0 ** 2)
* np.exp(-.5 * (1.0 / 5.0) ** 2))
assert np.allclose(llval[0, 0], np.log(a))
assert np.allclose(llval[1, 2], np.log(a))
# case x = 0.0
a = (.25 / np.sqrt(2 * np.pi * 1.0 ** 2))
a += (.25 / np.sqrt(2 * np.pi * 2.0 ** 2)
* np.exp(-.5 * (1.0 / 2.0) ** 2))
a += (.5 / np.sqrt(2 * np.pi * 5.0 ** 2)
* np.exp(-.5 * (2.0 / 5.0) ** 2))
assert np.allclose(llval[0, 1], np.log(a))
assert np.allclose(llval[0, 2], np.log(a))
assert np.allclose(llval[1, 0], np.log(a))
assert np.allclose(llval[1, 1], np.log(a))
assert np.allclose(llval[2, 0], np.log(a))
assert np.allclose(llval[2, 1], np.log(a))
assert np.isfinite(llval[2, 2])
class TestGMM1Math(unittest.TestCase):
def setUp(self):
self.rng = np.random.RandomState(234)
self.weights = [.1, .3, .4, .2]
self.mus = [1.0, 2.0, 3.0, 4.0]
self.sigmas = [.1, .4, .8, 2.0]
self.q = None
self.low = None
self.high = None
self.n_samples = 10001
self.samples_per_bin = 500
self.show = False
# -- triggers error if test case forgets to call work()
self.worked = False
def tearDown(self):
assert self.worked
def work(self):
self.worked = True
kwargs = dict(
weights=self.weights,
mus=self.mus,
sigmas=self.sigmas,
low=self.low,
high=self.high,
q=self.q,
)
samples = GMM1(rng=self.rng,
size=(self.n_samples,),
**kwargs)
samples = np.sort(samples)
edges = samples[::self.samples_per_bin]
#print samples
pdf = np.exp(GMM1_lpdf(edges[:-1], **kwargs))
dx = edges[1:] - edges[:-1]
y = 1 / dx / len(dx)
if self.show:
plt.scatter(edges[:-1], y)
plt.plot(edges[:-1], pdf)
plt.show()
err = (pdf - y) ** 2
print np.max(err)
print np.mean(err)
print np.median(err)
if not self.show:
assert np.max(err) < .1
assert np.mean(err) < .01
assert np.median(err) < .01
def test_basic(self):
self.work()
def test_bounded(self):
self.low = 2.5
self.high = 3.5
self.work()
class TestQGMM1Math(unittest.TestCase):
def setUp(self):
self.rng = np.random.RandomState(234)
self.weights = [.1, .3, .4, .2]
self.mus = [1.0, 2.0, 3.0, 4.0]
self.sigmas = [.1, .4, .8, 2.0]
self.low = None
self.high = None
self.n_samples = 1001
self.show = DO_SHOW # or put a string
# -- triggers error if test case forgets to call work()
self.worked = False
def tearDown(self):
assert self.worked
def work(self, **kwargs):
self.__dict__.update(kwargs)
del kwargs
self.worked = True
gkwargs = dict(
weights=self.weights,
mus=self.mus,
sigmas=self.sigmas,
low=self.low,
high=self.high,
q=self.q,
)
samples = GMM1(rng=self.rng,
size=(self.n_samples,),
**gkwargs) / self.q
print 'drew', len(samples), 'samples'
assert np.all(samples == samples.astype('int'))
min_max = int(samples.min()), int(samples.max())
counts = np.bincount(samples.astype('int') - min_max[0])
print counts
xcoords = np.arange(min_max[0], min_max[1] + 1) * self.q
prob = np.exp(GMM1_lpdf(xcoords, **gkwargs))
assert counts.sum() == self.n_samples
y = counts / float(self.n_samples)
if self.show:
plt.scatter(xcoords, y, c='r', label='empirical')
plt.scatter(xcoords, prob, c='b', label='predicted')
plt.legend()
plt.title(str(self.show))
plt.show()
err = (prob - y) ** 2
print np.max(err)
print np.mean(err)
print np.median(err)
if self.show:
raise nose.SkipTest()
else:
assert np.max(err) < .1
assert np.mean(err) < .01
assert np.median(err) < .01
def test_basic_1(self):
self.work(q=1)
def test_basic_2(self):
self.work(q=2)
def test_basic_pt5(self):
self.work(q=0.5)
def test_bounded_1(self):
self.work(q=1, low=2, high=4)
def test_bounded_2(self):
self.work(q=2, low=2, high=4)
def test_bounded_1b(self):
self.work(q=1, low=1, high=4.1)
def test_bounded_2b(self):
self.work(q=2, low=1, high=4.1)
def test_bounded_3(self):
self.work(
weights=[0.14285714, 0.28571429, 0.28571429, 0.28571429],
mus=[5.505, 7., 2., 10.],
sigmas=[8.99, 5., 8., 8.],
q=1,
low=1.01,
high=10,
n_samples=10000,
#show='bounded_3',
)
def test_bounded_3b(self):
self.work(
weights=[0.33333333, 0.66666667],
mus=[5.505, 5.],
sigmas=[8.99, 5.19],
q=1,
low=1.01,
high=10,
n_samples=10000,
#show='bounded_3b',
)
class TestLGMM1Math(unittest.TestCase):
def setUp(self):
self.rng = np.random.RandomState(234)
self.weights = [.1, .3, .4, .2]
self.mus = [-2.0, 1.0, 0.0, 3.0]
self.sigmas = [.1, .4, .8, 2.0]
self.low = None
self.high = None
self.n_samples = 10001
self.samples_per_bin = 200
self.show = False
# -- triggers error if test case forgets to call work()
self.worked = False
def tearDown(self):
assert self.worked
@property
def LGMM1_kwargs(self):
return dict(
weights=self.weights,
mus=self.mus,
sigmas=self.sigmas,
low=self.low,
high=self.high,
)
def LGMM1_lpdf(self, samples):
return self.LGMM1(samples, **self.LGMM1_kwargs)
def work(self, **kwargs):
self.__dict__.update(kwargs)
self.worked = True
samples = LGMM1(rng=self.rng,
size=(self.n_samples,),
**self.LGMM1_kwargs)
samples = np.sort(samples)
edges = samples[::self.samples_per_bin]
centers = .5 * edges[:-1] + .5 * edges[1:]
print edges
pdf = np.exp(LGMM1_lpdf(centers, **self.LGMM1_kwargs))
dx = edges[1:] - edges[:-1]
y = 1 / dx / len(dx)
if self.show:
plt.scatter(centers, y)
plt.plot(centers, pdf)
plt.show()
err = (pdf - y) ** 2
print np.max(err)
print np.mean(err)
print np.median(err)
if not self.show:
assert np.max(err) < .1
assert np.mean(err) < .01
assert np.median(err) < .01
def test_basic(self):
self.work()
def test_bounded(self):
self.work(low=2, high=4)
class TestQLGMM1Math(unittest.TestCase):
def setUp(self):
self.rng = np.random.RandomState(234)
self.weights = [.1, .3, .4, .2]
self.mus = [-2, 0.0, -3.0, 1.0]
self.sigmas = [2.1, .4, .8, 2.1]
self.low = None
self.high = None
self.n_samples = 1001
self.show = DO_SHOW
# -- triggers error if test case forgets to call work()
self.worked = False
def tearDown(self):
assert self.worked
@property
def kwargs(self):
return dict(
weights=self.weights,
mus=self.mus,
sigmas=self.sigmas,
low=self.low,
high=self.high,
q=self.q)
def QLGMM1_lpdf(self, samples):
return self.LGMM1(samples, **self.kwargs)
def work(self, **kwargs):
self.__dict__.update(kwargs)
self.worked = True
samples = LGMM1(rng=self.rng,
size=(self.n_samples,),
**self.kwargs) / self.q
# -- we've divided the LGMM1 by self.q to get ints here
assert np.all(samples == samples.astype('int'))
min_max = int(samples.min()), int(samples.max())
print 'SAMPLES RANGE', min_max
counts = np.bincount(samples.astype('int') - min_max[0])
#print samples
#print counts
xcoords = np.arange(min_max[0], min_max[1] + 0.5) * self.q
prob = np.exp(LGMM1_lpdf(xcoords, **self.kwargs))
print xcoords
print prob
assert counts.sum() == self.n_samples
y = counts / float(self.n_samples)
if self.show:
plt.scatter(xcoords, y, c='r', label='empirical')
plt.scatter(xcoords, prob, c='b', label='predicted')
plt.legend()
plt.show()
# -- calculate errors on the low end, don't take a mean
# over all the range spanned by a few outliers.
err = ((prob - y) ** 2)[:20]
print np.max(err)
print np.mean(err)
print np.median(err)
if self.show:
raise nose.SkipTest()
else:
assert np.max(err) < .1
assert np.mean(err) < .01
assert np.median(err) < .01
def test_basic_1(self):
self.work(q=1)
def test_basic_2(self):
self.work(q=2)
def test_basic_pt5(self):
self.work(q=0.5)
def test_basic_pt125(self):
self.work(q=0.125)
def test_bounded_1(self):
self.work(q=1, low=2, high=4)
def test_bounded_2(self):
self.work(q=2, low=2, high=4)
def test_bounded_1b(self):
self.work(q=1, low=1, high=4.1)
def test_bounded_2b(self):
self.work(q=2, low=1, high=4.1)
class CasePerBandit(object):
def test_quadratic1(self):
self.bandit = quadratic1()
self.work()
def test_q1lognormal(self):
self.bandit = q1_lognormal()
self.work()
def test_twoarms(self):
self.bandit = n_arms()
self.work()
def test_distractor(self):
self.bandit = distractor()
self.work()
def test_gausswave(self):
self.bandit = gauss_wave()
self.work()
def test_gausswave2(self):
self.bandit = gauss_wave2()
self.work()
def test_many_dists(self):
self.bandit = many_dists()
self.work()
if 0:
class TestPosteriorClone(unittest.TestCase, CasePerBandit):
def work(self):
"""Test that all prior samplers are gone"""
tpe_algo = TreeParzenEstimator(self.bandit)
foo = pyll.as_apply([
tpe_algo.post_below['idxs'],
tpe_algo.post_below['vals']])
prior_names = [
'uniform',
'quniform',
'loguniform',
'qloguniform',
'normal',
'qnormal',
'lognormal',
'qlognormal',
'randint',
]
for node in pyll.dfs(foo):
assert node.name not in prior_names
if 0:
class TestPosteriorCloneSample(unittest.TestCase, CasePerBandit):
def work(self):
bandit = self.bandit
random_algo = Random(bandit)
# build an experiment of 10 trials
trials = Trials()
exp = Experiment(trials, random_algo)
#print random_algo.s_specs_idxs_vals
exp.run(10)
ids = trials.tids
assert len(ids) == 10
tpe_algo = TreeParzenEstimator(bandit)
#print pyll.as_apply(tpe_algo.post_idxs)
#print pyll.as_apply(tpe_algo.post_vals)
argmemo = {}
print trials.miscs
idxs, vals = miscs_to_idxs_vals(trials.miscs)
argmemo[tpe_algo.observed['idxs']] = idxs
argmemo[tpe_algo.observed['vals']] = vals
argmemo[tpe_algo.observed_loss['idxs']] = trials.tids
argmemo[tpe_algo.observed_loss['vals']] = trials.losses()
stuff = pyll.rec_eval([tpe_algo.post_below['idxs'],
tpe_algo.post_below['vals']],
memo=argmemo)
print stuff
class TestSuggest(unittest.TestCase, CasePerBandit):
def work(self):
trials = Trials()
bandit = self.bandit
tpe_algo = TreeParzenEstimator(bandit)
tpe_algo.n_EI_candidates = 3
exp = Experiment(trials, tpe_algo)
exp.run(10)
class TestOpt(unittest.TestCase, CasePerBandit):
thresholds = dict(
quadratic1=1e-5,
q1_lognormal=0.01,
distractor=-1.96,
gauss_wave=-2.0,
gauss_wave2=-2.0,
n_arms=-2.5,
many_dists=.0005,
)
LEN = dict(
# -- running a long way out tests overflow/underflow
# to some extent
quadratic1=1000,
many_dists=200,
distractor=100,
#XXX
q1_lognormal=250,
)
gammas = dict(
distractor=.05,
)
prior_weights = dict(
distractor=.01,
)
n_EIs = dict(
#XXX
# -- this can be low in a few dimensions
quadratic1=5,
# -- lower number encourages exploration
# XXX: this is a damned finicky way to get TPE
# to solve the Distractor problem
distractor=15,
)
def setUp(self):
self.olderr = np.seterr('raise')
np.seterr(under='ignore')
def tearDown(self, *args):
np.seterr(**self.olderr)
def work(self):
bandit = self.bandit
assert bandit.name is not None
print 'Bandit', bandit.name
algo = TreeParzenEstimator(bandit,
gamma=self.gammas.get(bandit.name,
TreeParzenEstimator.gamma),
prior_weight=self.prior_weights.get(bandit.name,
TreeParzenEstimator.prior_weight),
n_EI_candidates=self.n_EIs.get(bandit.name,
TreeParzenEstimator.n_EI_candidates),
)
LEN = self.LEN.get(bandit.name, 50)
trials = Trials()
exp = Experiment(trials, algo)
exp.catch_bandit_exceptions = False
exp.run(LEN)
assert len(trials) == LEN
if 1:
rtrials = Trials()
exp = Experiment(rtrials, Random(bandit))
exp.run(LEN)
print 'RANDOM MINS', list(sorted(rtrials.losses()))[:6]
#logx = np.log([s['x'] for s in rtrials.specs])
#print 'RND MEAN', np.mean(logx)
#print 'RND STD ', np.std(logx)
print algo.n_EI_candidates
print algo.gamma
print algo.prior_weight
if 0:
plt.subplot(2, 2, 1)
plt.scatter(range(LEN), trials.losses())
plt.title('TPE losses')
plt.subplot(2, 2, 2)
plt.scatter(range(LEN), ([s['x'] for s in trials.specs]))
plt.title('TPE x')
plt.subplot(2, 2, 3)
plt.title('RND losses')
plt.scatter(range(LEN), rtrials.losses())
plt.subplot(2, 2, 4)
plt.title('RND x')
plt.scatter(range(LEN), ([s['x'] for s in rtrials.specs]))
plt.show()
if 0:
plt.hist(
[t['x'] for t in self.experiment.trials],
bins=20)
#print trials.losses()
print 'TPE MINS', list(sorted(trials.losses()))[:6]
#logx = np.log([s['x'] for s in trials.specs])
#print 'TPE MEAN', np.mean(logx)
#print 'TPE STD ', np.std(logx)
thresh = self.thresholds[bandit.name]
print 'Thresh', thresh
assert min(trials.losses()) < thresh
def notest_opt_qn_uniform():
notest_opt_qn_normal(hp_uniform)
def notest_opt_qn_normal(f=hp_normal):
bandit = Bandit(
{'loss': scope.sum([f('v%i' % ii, 0, 1)
for ii in range(25)]) ** 2},
loss_target=0)
algo = TreeParzenEstimator(bandit,
prior_weight=.5,
n_startup_jobs=0,
n_EI_candidates=1,
gamma=0.15)
trials = Trials()
experiment = Experiment(trials, algo, async=False)
experiment.max_queue_len = 1
experiment.run(40)
print 'sorted losses:', list(sorted(trials.losses()))
idxs, vals = miscs_to_idxs_vals(trials.miscs)
if 1:
import hyperopt.plotting
hyperopt.plotting.main_plot_vars(trials, bandit, do_show=1)
else:
import matplotlib.pyplot as plt
begin = [v[:10] for k, v in vals.items()]
end = [v[-10:] for k, v in vals.items()]
plt.subplot(2, 1, 1)
plt.title('before')
plt.hist(np.asarray(begin).flatten())
plt.subplot(2, 1, 2)
plt.title('after')
plt.hist(np.asarray(end).flatten())
plt.show()
@hyperopt.as_bandit(loss_target=0, rseed=123)
def opt_q_uniform(target):
x = hp_quniform('x', 1.01, 10, 1)
return {'loss': (x - target) ** 2 + scope.normal(0, 1)}
class TestOptQUniform():
show_steps = False
show_vars = DO_SHOW
LEN = 25
def work(self, **kwargs):
self.__dict__.update(kwargs)
bandit = opt_q_uniform(self.target)
prior_weight = 2.5
gamma = 0.20
algo = TreeParzenEstimator(bandit,
prior_weight=prior_weight,
n_startup_jobs=2,
n_EI_candidates=128,
gamma=gamma)
print algo.opt_idxs['x']
print algo.opt_vals['x']
trials = Trials()
experiment = Experiment(trials, algo)
experiment.run(self.LEN)
if self.show_vars:
import hyperopt.plotting
hyperopt.plotting.main_plot_vars(trials, bandit, do_show=1)
idxs, vals = miscs_to_idxs_vals(trials.miscs)
idxs = idxs['x']
vals = vals['x']
print "VALS", vals
losses = trials.losses()
from hyperopt.tpe import ap_filter_trials
from hyperopt.tpe import adaptive_parzen_samplers
qu = scope.quniform(1.01, 10, 1)
fn = adaptive_parzen_samplers['quniform']
fn_kwargs = dict(size=(4,), rng=np.random)
s_below = pyll.Literal()
s_above = pyll.Literal()
b_args = [s_below, prior_weight] + qu.pos_args
b_post = fn(*b_args, **fn_kwargs)
a_args = [s_above, prior_weight] + qu.pos_args
a_post = fn(*a_args, **fn_kwargs)
#print b_post
#print a_post
fn_lpdf = getattr(scope, a_post.name + '_lpdf')
print fn_lpdf
# calculate the llik of b_post under both distributions
a_kwargs = dict([(n, a) for n, a in a_post.named_args
if n not in ('rng', 'size')])
b_kwargs = dict([(n, a) for n, a in b_post.named_args
if n not in ('rng', 'size')])
below_llik = fn_lpdf(*([b_post] + b_post.pos_args), **b_kwargs)
above_llik = fn_lpdf(*([b_post] + a_post.pos_args), **a_kwargs)
new_node = scope.broadcast_best(b_post, below_llik, above_llik)
print '=' * 80
do_show = self.show_steps
for ii in range(2, 9):
if ii > len(idxs):
break
print '-' * 80
print 'ROUND', ii
print '-' * 80
all_vals = [2, 3, 4, 5, 6, 7, 8, 9, 10]
below, above = ap_filter_trials(idxs[:ii],
vals[:ii], idxs[:ii], losses[:ii], gamma)
below = below.astype('int')
above = above.astype('int')
print 'BB0', below
print 'BB1', above
#print 'BELOW', zip(range(100), np.bincount(below, minlength=11))
#print 'ABOVE', zip(range(100), np.bincount(above, minlength=11))
memo = {b_post: all_vals, s_below: below, s_above: above}
bl, al, nv = pyll.rec_eval([below_llik, above_llik, new_node],
memo=memo)
#print bl - al
print 'BB2', dict(zip(all_vals, bl - al))
print 'BB3', dict(zip(all_vals, bl))
print 'BB4', dict(zip(all_vals, al))
print 'ORIG PICKED', vals[ii]
print 'PROPER OPT PICKS:', nv
#assert np.allclose(below, [3, 3, 9])
#assert len(below) + len(above) == len(vals)
if do_show:
plt.subplot(8, 1, ii)
#plt.scatter(all_vals,
# np.bincount(below, minlength=11)[2:], c='b')
#plt.scatter(all_vals,
# np.bincount(above, minlength=11)[2:], c='c')
plt.scatter(all_vals, bl, c='g')
plt.scatter(all_vals, al, c='r')
if do_show:
plt.show()
def test4(self):
self.work(target=4, LEN=100)
def test2(self):
self.work(target=2, LEN=100)
def test6(self):
self.work(target=6, LEN=100)
def test10(self):
self.work(target=10, LEN=100)
| gpl-3.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/tests/series/test_combine_concat.py | 6 | 11854 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytest
from datetime import datetime
from numpy import nan
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, date_range, DatetimeIndex
from pandas import compat
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesCombine(TestData):
def test_append(self):
appendedSeries = self.series.append(self.objSeries)
for idx, value in compat.iteritems(appendedSeries):
if idx in self.series.index:
assert value == self.series[idx]
elif idx in self.objSeries.index:
assert value == self.objSeries[idx]
else:
self.fail("orphaned index!")
pytest.raises(ValueError, self.ts.append, self.ts,
verify_integrity=True)
def test_append_many(self):
pieces = [self.ts[:5], self.ts[5:10], self.ts[10:]]
result = pieces[0].append(pieces[1:])
assert_series_equal(result, self.ts)
def test_append_duplicates(self):
# GH 13677
s1 = pd.Series([1, 2, 3])
s2 = pd.Series([4, 5, 6])
exp = pd.Series([1, 2, 3, 4, 5, 6], index=[0, 1, 2, 0, 1, 2])
tm.assert_series_equal(s1.append(s2), exp)
tm.assert_series_equal(pd.concat([s1, s2]), exp)
# the result must have RangeIndex
exp = pd.Series([1, 2, 3, 4, 5, 6])
tm.assert_series_equal(s1.append(s2, ignore_index=True),
exp, check_index_type=True)
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True),
exp, check_index_type=True)
msg = 'Indexes have overlapping values:'
with tm.assert_raises_regex(ValueError, msg):
s1.append(s2, verify_integrity=True)
with tm.assert_raises_regex(ValueError, msg):
pd.concat([s1, s2], verify_integrity=True)
def test_combine_first(self):
values = tm.makeIntIndex(20).values.astype(float)
series = Series(values, index=tm.makeIntIndex(20))
series_copy = series * 2
series_copy[::2] = np.NaN
# nothing used from the input
combined = series.combine_first(series_copy)
tm.assert_series_equal(combined, series)
# Holes filled from input
combined = series_copy.combine_first(series)
assert np.isfinite(combined).all()
tm.assert_series_equal(combined[::2], series[::2])
tm.assert_series_equal(combined[1::2], series_copy[1::2])
# mixed types
index = tm.makeStringIndex(20)
floats = Series(tm.randn(20), index=index)
strings = Series(tm.makeStringIndex(10), index=index[::2])
combined = strings.combine_first(floats)
tm.assert_series_equal(strings, combined.loc[index[::2]])
tm.assert_series_equal(floats[1::2].astype(object),
combined.loc[index[1::2]])
# corner case
s = Series([1., 2, 3], index=[0, 1, 2])
result = s.combine_first(Series([], index=[]))
assert_series_equal(s, result)
def test_update(self):
s = Series([1.5, nan, 3., 4., nan])
s2 = Series([nan, 3.5, nan, 5.])
s.update(s2)
expected = Series([1.5, 3.5, 3., 5., np.nan])
assert_series_equal(s, expected)
# GH 3217
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
# this will fail as long as series is a sub-class of ndarray
# df['c'].update(Series(['foo'],index=[0])) #####
def test_concat_empty_series_dtypes_roundtrips(self):
# round-tripping with self & like self
dtypes = map(np.dtype, ['float64', 'int8', 'uint8', 'bool', 'm8[ns]',
'M8[ns]'])
for dtype in dtypes:
assert pd.concat([Series(dtype=dtype)]).dtype == dtype
assert pd.concat([Series(dtype=dtype),
Series(dtype=dtype)]).dtype == dtype
def int_result_type(dtype, dtype2):
typs = set([dtype.kind, dtype2.kind])
if not len(typs - set(['i', 'u', 'b'])) and (dtype.kind == 'i' or
dtype2.kind == 'i'):
return 'i'
elif not len(typs - set(['u', 'b'])) and (dtype.kind == 'u' or
dtype2.kind == 'u'):
return 'u'
return None
def float_result_type(dtype, dtype2):
typs = set([dtype.kind, dtype2.kind])
if not len(typs - set(['f', 'i', 'u'])) and (dtype.kind == 'f' or
dtype2.kind == 'f'):
return 'f'
return None
def get_result_type(dtype, dtype2):
result = float_result_type(dtype, dtype2)
if result is not None:
return result
result = int_result_type(dtype, dtype2)
if result is not None:
return result
return 'O'
for dtype in dtypes:
for dtype2 in dtypes:
if dtype == dtype2:
continue
expected = get_result_type(dtype, dtype2)
result = pd.concat([Series(dtype=dtype), Series(dtype=dtype2)
]).dtype
assert result.kind == expected
def test_concat_empty_series_dtypes(self):
# booleans
assert pd.concat([Series(dtype=np.bool_),
Series(dtype=np.int32)]).dtype == np.int32
assert pd.concat([Series(dtype=np.bool_),
Series(dtype=np.float32)]).dtype == np.object_
# datetime-like
assert pd.concat([Series(dtype='m8[ns]'),
Series(dtype=np.bool)]).dtype == np.object_
assert pd.concat([Series(dtype='m8[ns]'),
Series(dtype=np.int64)]).dtype == np.object_
assert pd.concat([Series(dtype='M8[ns]'),
Series(dtype=np.bool)]).dtype == np.object_
assert pd.concat([Series(dtype='M8[ns]'),
Series(dtype=np.int64)]).dtype == np.object_
assert pd.concat([Series(dtype='M8[ns]'),
Series(dtype=np.bool_),
Series(dtype=np.int64)]).dtype == np.object_
# categorical
assert pd.concat([Series(dtype='category'),
Series(dtype='category')]).dtype == 'category'
assert pd.concat([Series(dtype='category'),
Series(dtype='float64')]).dtype == 'float64'
assert pd.concat([Series(dtype='category'),
Series(dtype='object')]).dtype == 'object'
# sparse
result = pd.concat([Series(dtype='float64').to_sparse(), Series(
dtype='float64').to_sparse()])
assert result.dtype == np.float64
assert result.ftype == 'float64:sparse'
result = pd.concat([Series(dtype='float64').to_sparse(), Series(
dtype='float64')])
assert result.dtype == np.float64
assert result.ftype == 'float64:sparse'
result = pd.concat([Series(dtype='float64').to_sparse(), Series(
dtype='object')])
assert result.dtype == np.object_
assert result.ftype == 'object:dense'
def test_combine_first_dt64(self):
from pandas.core.tools.datetimes import to_datetime
s0 = to_datetime(Series(["2010", np.NaN]))
s1 = to_datetime(Series([np.NaN, "2011"]))
rs = s0.combine_first(s1)
xp = to_datetime(Series(['2010', '2011']))
assert_series_equal(rs, xp)
s0 = to_datetime(Series(["2010", np.NaN]))
s1 = Series([np.NaN, "2011"])
rs = s0.combine_first(s1)
xp = Series([datetime(2010, 1, 1), '2011'])
assert_series_equal(rs, xp)
class TestTimeseries(object):
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
tm.assert_index_equal(result.index, ex_index)
tm.assert_index_equal(result_df.index, ex_index)
appended = rng.append(rng)
tm.assert_index_equal(appended, ex_index)
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
tm.assert_index_equal(appended, ex_index)
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = 'foo'
rng2.name = 'bar'
assert rng1.append(rng1).name == 'foo'
assert rng1.append(rng2).name is None
def test_append_concat_tz(self):
# GH 2938
tm._skip_if_no_pytz()
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
tm.assert_index_equal(result.index, rng3)
tm.assert_index_equal(result_df.index, rng3)
appended = rng.append(rng2)
tm.assert_index_equal(appended, rng3)
def test_append_concat_tz_explicit_pytz(self):
# GH 2938
tm._skip_if_no_pytz()
from pytz import timezone as timezone
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz=timezone('US/Eastern'))
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz=timezone('US/Eastern'))
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz=timezone('US/Eastern'))
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
tm.assert_index_equal(result.index, rng3)
tm.assert_index_equal(result_df.index, rng3)
appended = rng.append(rng2)
tm.assert_index_equal(appended, rng3)
def test_append_concat_tz_dateutil(self):
# GH 2938
tm._skip_if_no_dateutil()
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='dateutil/US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='dateutil/US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='dateutil/US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
tm.assert_index_equal(result.index, rng3)
tm.assert_index_equal(result_df.index, rng3)
appended = rng.append(rng2)
tm.assert_index_equal(appended, rng3)
| mit |
imaculate/scikit-learn | sklearn/svm/tests/test_bounds.py | 280 | 2541 | import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
adrn/GaiaPairsFollowup | scripts/solve_velocity.py | 1 | 11029 | # coding: utf-8
"""
"""
# Standard library
import os
from os import path
# Third-party
from astropy.constants import c
import astropy.units as u
import numpy as np
import matplotlib.pyplot as plt
import schwimmbad
from schwimmbad import choose_pool
# Project
from comoving_rv.log import logger
from comoving_rv.db import Session, db_connect
from comoving_rv.db.model import (SpectralLineMeasurement, SpectralLineInfo,
Run, Observation, PriorRV, RVMeasurement)
class RVCorrector(object):
def __init__(self, session, run_name):
self.session = session
self.run_name = str(run_name)
# get wavelength for Halpha
self.Halpha, = session.query(SpectralLineInfo.wavelength)\
.filter(SpectralLineInfo.name == 'Halpha').one()
self._compute_offset_corrections()
def _compute_offset_corrections(self):
session = self.session
run_name = self.run_name
q = session.query(Observation).join(Run, SpectralLineMeasurement, PriorRV)
q = q.filter(Run.name == run_name)
q = q.filter(SpectralLineMeasurement.x0 != None)
q = q.filter(PriorRV.rv != None)
logger.debug('{0} observations with prior RV measurements'
.format(q.distinct().count()))
# retrieve all observations with measured centroids and previous RV's
observations = q.all()
# What we do below is look at the residual offsets between applying a naïve
# sky-line correction and the true RV (with the barycentric velocity
# applied)
raw_offsets = np.zeros(len(observations)) * u.angstrom
all_sky_offsets = np.full((len(observations), 3), np.nan) * u.angstrom
true_rv = np.zeros(len(observations)) * u.km/u.s
obs_time = np.zeros(len(observations))
night_id = np.zeros(len(observations), dtype=int)
corrected_rv = np.zeros(len(observations)) * u.km/u.s
for i,obs in enumerate(observations):
# convert obstime into decimal hour
obs_time[i] = np.sum(np.array(list(map(float, obs.time_obs.split(':')))) / np.array([1., 60., 3600.]))
# Compute the raw offset: difference between Halpha centroid and true
# wavelength value
x0 = obs.measurements[0].x0 * u.angstrom
offset = (x0 - self.Halpha)
raw_offsets[i] = offset
night_id[i] = obs.night
# For each sky line (that passes certain quality checks), compute the
# offset between the predicted wavelength and measured centroid
# TODO: generalize these quality cuts - see also below in
# get_corrected_rv
sky_offsets = []
for j,meas in enumerate(obs.measurements[1:]):
sky_offset = meas.x0*u.angstrom - meas.info.wavelength
if (meas.amp > 16 and meas.std_G < 2 and meas.std_G > 0.3 and
np.abs(sky_offset) < 4*u.angstrom): # MAGIC NUMBER: quality cuts
sky_offsets.append(sky_offset)
all_sky_offsets[i,j] = sky_offset
sky_offsets = u.Quantity(sky_offsets)
if len(sky_offsets) > 0:
sky_offset = np.mean(sky_offsets)
else:
sky_offset = np.nan * u.angstrom
logger.debug("not correcting with sky line for {0}".format(obs))
true_rv[i] = obs.prior_rv.rv - obs.v_bary
raw_rv = raw_offsets / self.Halpha * c.to(u.km/u.s)
# unique night ID's
unq_night_id = np.unique(night_id)
unq_night_id.sort()
# Now we do a totally insane thing. From visualizing the residual
# differences, there seems to be a trend with the observation time. We
# fit a line to these residuals and use this to further correct the
# wavelength solutions using just the (strongest) [OI] 5577 Å line.
diff = all_sky_offsets[:,0] - ((raw_rv - true_rv)/c*5577*u.angstrom).decompose()
diff[np.abs(diff) > 2*u.angstrom] = np.nan * u.angstrom # reject BIG offsets
self._night_polys = dict()
self._night_final_offsets = dict()
for n in unq_night_id:
mask = (night_id == n) & np.isfinite(diff)
coef = np.polyfit(obs_time[mask], diff[mask], deg=1, w=np.full(mask.sum(), 1/0.1))
poly = np.poly1d(coef)
self._night_polys[n] = poly
sky_offset = np.nanmean(all_sky_offsets[mask,:2], axis=1)
sky_offset[np.isnan(sky_offset)] = 0.*u.angstrom
sky_offset -= self._night_polys[n](obs_time[mask]) * u.angstrom
corrected_rv[mask] = (raw_offsets[mask] - sky_offset) / self.Halpha * c.to(u.km/u.s)
# Finally, we align the median of each night's ∆RV distribution with 0
drv = corrected_rv[mask] - true_rv[mask]
self._night_final_offsets[n] = np.nanmedian(drv)
# now estimate the std. dev. uncertainty using the MAD
all_drv = corrected_rv - true_rv
self._abs_err = 1.5 * np.nanmedian(np.abs(all_drv - np.nanmedian(all_drv)))
def get_corrected_rv(self, obs):
"""Compute a corrected radial velocity for the given observation"""
# Compute the raw offset: difference between Halpha centroid and true
# wavelength value
x0 = obs.measurements[0].x0 * u.angstrom
raw_offset = (x0 - self.Halpha)
# precision estimate from line centroid error
precision = (obs.measurements[0].x0_error * u.angstrom) / self.Halpha * c.to(u.km/u.s)
# For each sky line (that passes certain quality checks), compute the
# offset between the predicted wavelength and measured centroid
# TODO: generalize these quality cuts - see also above in
# _compute_offset_corrections
sky_offsets = np.full(3, np.nan) * u.angstrom
for j,meas in enumerate(obs.measurements[1:]):
sky_offset = meas.x0*u.angstrom - meas.info.wavelength
if (meas.amp > 16 and meas.std_G < 2 and meas.std_G > 0.3 and
np.abs(sky_offset) < 3.3*u.angstrom): # MAGIC NUMBER: quality cuts
sky_offsets[j] = sky_offset
# final sky offset to apply
flag = 0
sky_offset = np.nanmean(sky_offsets)
if np.isnan(sky_offset.value):
logger.debug("not correcting with sky line for {0}".format(obs))
sky_offset = 0*u.angstrom
flag = 1
# apply global sky offset correction - see _compute_offset_corrections()
sky_offset -= self._night_polys[obs.night](obs.utc_hour) * u.angstrom
# compute radial velocity and correct for sky line
rv = (raw_offset - sky_offset) / self.Halpha * c.to(u.km/u.s)
# correct for offset of median of ∆RV distribution from targets with
# prior/known RV's
rv -= self._night_final_offsets[obs.night]
# rv error
err = np.sqrt(self._abs_err**2 + precision**2)
return rv, err, flag
def main(db_path, run_name, overwrite=False, pool=None):
if pool is None:
pool = schwimmbad.SerialPool()
# connect to the database
engine = db_connect(db_path)
# engine.echo = True
logger.debug("Connected to database at '{}'".format(db_path))
# create a new session for interacting with the database
session = Session()
root_path, _ = path.split(db_path)
plot_path = path.join(root_path, 'plots', run_name)
if not path.exists(plot_path):
os.makedirs(plot_path, exist_ok=True)
# get object to correct the observed RV's
rv_corr = RVCorrector(session, run_name)
observations = session.query(Observation).join(Run)\
.filter(Run.name == run_name).all()
for obs in observations:
q = session.query(RVMeasurement).join(Observation)\
.filter(Observation.id == obs.id)
if q.count() > 0 and not overwrite:
logger.debug('RV measurement already complete for object '
'{0} in file {1}'.format(obs.object, obs.filename_raw))
continue
elif q.count() > 1:
raise RuntimeError('Multiple RV measurements found for object {0}'
.format(obs))
elif len(obs.measurements) == 0:
logger.debug('Observation {0} has no line measurements.'
.format(obs))
continue
corrected_rv, err, flag = rv_corr.get_corrected_rv(obs)
# remove previous RV measurements
if q.count() > 0:
session.delete(q.one())
session.commit()
rv_meas = RVMeasurement(rv=corrected_rv, err=err, flag=flag)
rv_meas.observation = obs
session.add(rv_meas)
session.commit()
pool.close()
if __name__ == "__main__":
from argparse import ArgumentParser
import logging
# Define parser object
parser = ArgumentParser(description="")
vq_group = parser.add_mutually_exclusive_group()
vq_group.add_argument('-v', '--verbose', action='count',
default=0, dest='verbosity')
vq_group.add_argument('-q', '--quiet', action='count',
default=0, dest='quietness')
parser.add_argument('-s', '--seed', dest='seed', default=None,
type=int, help='Random number generator seed.')
parser.add_argument('-o', '--overwrite', action='store_true',
dest='overwrite', default=False,
help='Destroy everything.')
parser.add_argument('-d', '--db', dest='db_path', required=True,
help='Path to sqlite database file')
parser.add_argument('-r', '--run', dest='run_name', required=True,
help='Name of the observing run')
# multiprocessing options
group = parser.add_mutually_exclusive_group()
group.add_argument('--ncores', dest='n_cores', default=1,
type=int, help='Number of CPU cores to use.')
group.add_argument('--mpi', dest='mpi', default=False,
action='store_true', help='Run with MPI.')
args = parser.parse_args()
# Set logger level based on verbose flags
if args.verbosity != 0:
if args.verbosity == 1:
logger.setLevel(logging.DEBUG)
else: # anything >= 2
logger.setLevel(1)
elif args.quietness != 0:
if args.quietness == 1:
logger.setLevel(logging.WARNING)
else: # anything >= 2
logger.setLevel(logging.ERROR)
else: # default
logger.setLevel(logging.INFO)
if args.seed is not None:
np.random.seed(args.seed)
pool = choose_pool(mpi=args.mpi, processes=args.n_cores)
logger.info("Using pool: {}".format(pool.__class__))
main(db_path=args.db_path, run_name=args.run_name,
overwrite=args.overwrite, pool=pool)
| mit |
jkarnows/scikit-learn | examples/linear_model/plot_ransac.py | 250 | 1673 | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
| bsd-3-clause |
suyashbire1/pyhton_scripts_mom6 | plot_pv.py | 1 | 2649 | import sys
import readParams_moreoptions as rdp1
import matplotlib.pyplot as plt
from mom_plot1 import m6plot
import numpy as np
from netCDF4 import MFDataset as mfdset, Dataset as dset
import time
from getvaratz import getvaratz
def extract_twapv(geofil,vgeofil,fil,fil2,xstart,xend,ystart,yend,
zs,ze,meanax,savfil=None, cmaxscalefactor=None,
plotatz=False,Z=None):
keepax = ()
for i in range(4):
if i not in meanax:
keepax += (i,)
fhgeo = dset(geofil)
fh = mfdset(fil)
fh2 = mfdset(fil2)
zi = rdp1.getdims(fh)[2][0]
dbl = np.diff(zi)*9.8/1031
(xs,xe),(ys,ye),dimq = rdp1.getlatlonindx(fh,wlon=xstart,elon=xend,
slat=ystart, nlat=yend,zs=zs,ze=ze,xhxq='xq',yhyq='yq')
dxbu = rdp1.getgeombyindx(fhgeo,xs,xe,ys,ye)[2][4]
dybu = rdp1.getgeombyindx(fhgeo,xs,xe,ys,ye)[2][5]
dycu = rdp1.getgeombyindx(fhgeo,xs,xe,ys,ye+1)[2][1]
dxcv = rdp1.getgeombyindx(fhgeo,xs,xe,ys,ye)[2][2]
f = rdp1.getgeombyindx(fhgeo,xs,xe,ys,ye)[-1]
nt_const = dimq[0].size
fhgeo.close()
em = fh.variables['e'][0:,zs:ze,ys:ye,xs:xe]
elm = 0.5*(em[:,0:-1,:,:]+em[:,1:,:,:])
uh = fh.variables['uh'][0:,zs:ze,ys:ye+1,xs:xe]
h_cu = fh2.variables['h_Cu'][0:,zs:ze,ys:ye+1,xs:xe]
utwa = uh/h_cu/dycu
h_cu = np.where(h_cu>1e-3,h_cu,np.nan)
vh = fh.variables['vh'][0:,zs:ze,ys:ye,xs:xe]
h_cv = fh2.variables['h_Cv'][0:,zs:ze,ys:ye,xs:xe]
vtwa = vh/h_cv/dxcv
vtwa = np.concatenate((vtwa,-vtwa[:,:,:,-1:]),axis=3)
h_cv = np.concatenate((h_cv,-h_cv[:,:,:,-1:]),axis=3)
h_cv = np.where(h_cv>1e-3,h_cv,np.nan)
fh2.close()
fh.close()
hq = 0.25*(h_cu[:,:,:-1,:] + h_cv[:,:,:,:-1] +
h_cu[:,:,1:,:] + h_cv[:,:,:,1:])
pv = f -np.diff(utwa,axis=2)/dybu + np.diff(vtwa,axis=3)/dxbu
pv = pv/(hq/dbl[:,np.newaxis,np.newaxis])
X = dimq[keepax[1]]
Y = dimq[keepax[0]]
if 1 in keepax:
em = np.ma.apply_over_axes(np.mean, em, meanax)
elm = np.ma.apply_over_axes(np.mean, elm, meanax)
Y = elm.squeeze()
X = np.meshgrid(X,dimq[1])[0]
if plotatz:
pv = getvaratz(pv,Z,em)
pv = np.ma.apply_over_axes(np.nanmean, pv, meanax)
pv = pv.squeeze()
cmax = np.nanmax(np.absolute(pv))*cmaxscalefactor
im = m6plot((X,Y,pv), xlabel=r'x ($^{\circ}$ E)',ylabel=r'y ($^{\circ}$ N)',
vmin=6e-10,vmax=cmax,aspect='equal',bvnorm=True)
if savfil:
plt.savefig(savfil+'.eps', dpi=300, facecolor='w', edgecolor='w',
format='eps', transparent=False, bbox_inches='tight')
else:
plt.show()
| gpl-3.0 |
mattgiguere/scikit-learn | examples/linear_model/plot_ard.py | 248 | 2622 | """
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="ARD estimate")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
matmodlab/matmodlab2 | matmodlab2/core/database.py | 1 | 15424 | import os
import re
import datetime
import numpy as np
from scipy.io.netcdf import NetCDFFile
COMPONENT_SEP = '.'
def read_exodb(filename):
db = DatabaseFileReader(filename)
return db.df
def read_npzdb(filename):
from pandas import DataFrame
f = np.load(filename)
return DataFrame(f['data'], columns=f['columns'])
def read_db(filename):
print(filename)
if filename.endswith('npz'):
return read_npzdb(filename)
elif filename.endswith('exo'):
db = DatabaseFileReader(filename)
return db.df
elif filename.endswith(('dat', 'txt')):
from pandas import read_table
return read_table(filename, sep='\s+')
else:
raise ValueError('Unknown file extension')
def cat(*args):
return ''.join(str(a).strip() for a in args)
def adjstr(string):
return '{0:33s}'.format(string)[:33]
def stringify2(a):
stringified = []
for row in a:
try:
item = ''.join(row)
except TypeError:
item = b''.join(row).decode('utf-8')
stringified.append(item.strip())
return stringified
def DatabaseFile(jobid, mode='r'):
"""The database file factory method"""
if mode not in 'rw':
raise ValueError('Mode must be r or w')
exts = ('.exo', '.gen', '.base_exo', '.e', '.g', '.npz')
if jobid.endswith(exts):
filename = jobid
jobid = os.path.splitext(os.path.basename(jobid))[0]
else:
filename = jobid + exts[0]
if not filename.endswith(exts):
raise ValueError('Not a valid ExodusII file extension')
if mode == 'w':
if not filename.endswith(exts[:-1]):
raise ValueError('DatabaseWriter is only for exodus files')
return DatabaseFileWriter(jobid, filename)
if filename.endswith('.npz'):
from pandas import DataFrame
f = np.load(filename)
df = DataFrame(f['data'], columns=f['columns'])
else:
db = DatabaseFileReader(filename)
db.fh.close()
df = db.df
return df
def groupby_names(names, sep=None, disp=0):
return _DatabaseFile.groupby_names(names, sep=sep, disp=disp)
class _DatabaseFile(object):
coordx = np.array([-0.5, 0.5, 0.5, -0.5, -0.5, 0.5, 0.5, -0.5]) + .5
coordy = np.array([-0.5, -0.5, 0.5, 0.5, -0.5, -0.5, 0.5, 0.5]) + .5
coordz = np.array([-0.5, -0.5, -0.5, -0.5, 0.5, 0.5, 0.5, 0.5]) + .5
def get_elem_var_names(self):
return stringify2(self.fh.variables['name_elem_var'].data)
def get_glo_var_names(self):
return stringify2(self.fh.variables['name_glo_var'].data)
def get_qa_records(self):
return stringify2(self.fh.variables['qa_records'].data[0])
def get_jobid(self):
qa_records = self.get_qa_records()
return qa_records[1]
@staticmethod
def groupby_names(names, sep=None, disp=0):
"""Group variables by name
Returns
-------
names_and_cols : dict
Dictionary with name:column number[s] pairs
"""
# Mappings from component name to number
stc = dict(zip(('XX', 'YY', 'ZZ', 'XY', 'YZ', 'XZ'), range(6)))
tc = dict(zip(('XX','XY','XZ','YX','YY','YZ','ZX','ZY','ZZ'), range(9)))
vc = dict(zip(('X', 'Y', 'Z'), range(3)))
sep = sep or COMPONENT_SEP
names_and_components = {}
for (i, name) in enumerate(names):
try:
name, x = name.rsplit(sep, 1)
names_and_components.setdefault(name, []).append((i, x))
except ValueError:
names_and_components.setdefault(name, []).append((i, None))
names_and_cols = {}
for (name, components) in names_and_components.items():
if len(components) == 1 and components[0][1] is None:
names_and_cols[name] = components[0][0]
continue
if len(components) == 3:
components = sorted(components, key=lambda x: vc[x[1]])
elif len(components) == 6:
components = sorted(components, key=lambda x: stc[x[1]])
elif len(components) == 9:
components = sorted(components, key=lambda x: tc[x[1]])
else:
raise TypeError('Unknown components for {0!r}'.format(name))
if disp:
names_and_cols[name] = [x[0] for x in components]
else:
names_and_cols[name] = [x[1] for x in components]
return names_and_cols
class DatabaseFileWriter(_DatabaseFile):
mode = 'w'
def __init__(self, jobid, filename):
'''
Notes
-----
The EXOFile class is an interface to the Exodus II api. Its methods
are named after the analogous method from the Exodus II C bindings,
minus the prefix 'ex_'.
'''
self.jobid = jobid
self.filename = filename
self.fh = NetCDFFile(filename, mode='w')
def initialize(self, glo_var_names, elem_var_names):
"""Initialize the output database
Parameters
----------
glo_var_names : list of string
elem_var_names : list of string
"""
# ------------------------------------------------------------------- #
# -------------------------------- standard ExodusII dimensioning --- #
# ------------------------------------------------------------------- #
self.fh.floating_point_word_size = 4
self.fh.version = 5.0300002
self.fh.file_size = 1
self.fh.api_version = 5.0300002
self.fh.title = 'Matmodlab material point simulation'
self.fh.filename = os.path.basename(self.filename)
self.fh.jobid = self.jobid
self.fh.createDimension('time_step', None)
self.fh.createDimension('len_string', 33)
self.fh.createDimension('len_line', 81)
self.fh.createDimension('four', 4)
self.fh.createDimension('num_dim', 3)
self.fh.createDimension('num_nodes', 8)
self.fh.createDimension('num_elem', 1)
# ------------------------------------------------------------------- #
# ---------------------------------------------------- QA records --- #
# ------------------------------------------------------------------- #
now = datetime.datetime.now()
day = now.strftime("%m/%d/%y")
hour = now.strftime("%H:%M:%S")
self.fh.createDimension('num_qa_rec', 1)
self.fh.createVariable('qa_records', 'c',
('num_qa_rec', 'four', 'len_string'))
self.fh.variables['qa_records'][0, 0, :] = adjstr('Matmodlab')
self.fh.variables['qa_records'][0, 1, :] = adjstr(self.jobid)
self.fh.variables['qa_records'][0, 2, :] = adjstr(day)
self.fh.variables['qa_records'][0, 3, :] = adjstr(hour)
# ------------------------------------------------------------------- #
# ------------------------------------------------- record arrays --- #
# ------------------------------------------------------------------- #
self.fh.createVariable('time_whole', 'f', ('time_step',))
# ------------------------------------------------------------------- #
# --------------------------------------- element block meta data --- #
# ------------------------------------------------------------------- #
# block IDs - standard map
self.fh.createDimension('num_el_blk', 1)
self.fh.createVariable('eb_prop1', 'i', ('num_el_blk',))
self.fh.variables['eb_prop1'][:] = np.arange(1, dtype=np.int32)+1
self.fh.variables['eb_prop1'].name = 'ID'
self.fh.createVariable('eb_status', 'i', ('num_el_blk',))
self.fh.variables['eb_status'][:] = np.ones(1, dtype=int)
self.fh.createVariable('eb_names', 'c', ('num_el_blk', 'len_string'))
self.fh.variables['eb_names'][0][:] = adjstr('ElementBlock1')
# element map
self.fh.createDimension('num_el_in_blk1', 1)
self.fh.createDimension('num_nod_per_el1', 8)
self.fh.createVariable('elem_map1', 'i', ('num_elem',))
self.fh.variables['elem_map1'][:] = np.arange(1, dtype=np.int32)+1
# set up the element block connectivity
dim = ('num_el_in_blk1', 'num_nod_per_el1')
self.fh.createVariable('connect1', 'i', dim)
self.fh.variables['connect1'][:] = np.arange(8, dtype=np.int32)+1
self.fh.variables['connect1'].elem_type = 'HEX'
# ------------------------------------------------------------------- #
# -------------------------------------------------- Element data --- #
# ------------------------------------------------------------------- #
num_elem_var = len(elem_var_names)
self.fh.createDimension('num_elem_var', num_elem_var)
dim = ('num_elem_var', 'len_string')
self.fh.createVariable('name_elem_var', 'c', dim)
for (i, name) in enumerate(elem_var_names):
key = adjstr(name)
self.fh.variables['name_elem_var'][i, :] = key
self.fh.createVariable('vals_elem_var{0}eb1'.format(i+1),
'f', ('time_step', 'num_el_in_blk1'))
self.fh.createVariable('elem_var_tab', 'i', ('num_elem_var',))
elem_var_tab = np.ones(num_elem_var, dtype=np.int32)
self.fh.variables['elem_var_tab'][:] = elem_var_tab
# ------------------------------------------------------------------- #
# ----------------------------------------------------- Node data --- #
# ------------------------------------------------------------------- #
vertices = [self.coordx, self.coordy, self.coordz]
self.fh.createVariable('coor_names', 'c', ('num_dim', 'len_string'))
for i in range(3):
key = 'coord' + 'xyz'[i]
self.fh.variables['coor_names'][i][:] = adjstr(key)
self.fh.createVariable(key, 'f', ('num_nodes',))
self.fh.variables[key][:] = vertices[i]
self.fh.createDimension('num_nod_var', 3)
dim = ('num_nod_var', 'len_string')
self.fh.createVariable('name_nod_var', 'c', dim)
for i in range(3):
key = 'displ' + 'xyz'[i]
self.fh.variables['name_nod_var'][i, :] = adjstr(key)
self.fh.createVariable('vals_nod_var{0}'.format(i+1), 'f',
('time_step', 'num_nodes'))
# ------------------------------------------------------------------- #
# ---------------------------------------------- Global variables --- #
# ------------------------------------------------------------------- #
self.fh.createDimension('num_glo_var', len(glo_var_names))
dim = ('num_glo_var', 'len_string')
self.fh.createVariable('name_glo_var', 'c', dim)
for (i, key) in enumerate(glo_var_names):
self.fh.variables['name_glo_var'][i, :] = adjstr(key)
self.fh.createVariable('vals_glo_var', 'f', ('time_step', 'num_glo_var'))
self.initialized = True
return
def update_displ(self, elem_var_vals):
"""Update the node positions based on the deformation gradient"""
elem_var_names = self.get_elem_var_names()
names_and_cols = self.groupby_names(elem_var_names, disp=1)
cols = names_and_cols['F']
F = np.array(elem_var_vals[cols]).reshape((3,3))
displ = []
for i in range(8):
X = np.array([self.coordx[i], self.coordy[i], self.coordz[i]])
x = np.dot(F, X)
displ.append(x)
displ = np.array(displ).T
return displ
def save(self, time, glo_var_vals, elem_var_vals):
"""Save the step information to the database file
Parameters
----------
time : float
Time at end of increment
glo_var_vals : ndarray
Global variable values, in same order put in to the database
elem_var_vals : ndarray
Element variable values, in same order put in to the database
"""
# write time value
count = len(self.fh.variables['time_whole'].data)
self.fh.variables['time_whole'][count] = time
self.fh.variables['vals_glo_var'][count] = glo_var_vals
# get node and element fields
elem_var_names = self.get_elem_var_names()
if len(elem_var_names) != len(elem_var_vals):
l1, l2 = len(elem_var_names), len(elem_var_vals)
message = 'Expected {0} sdv got {1}'.format(l1, l2)
raise RuntimeError(message)
for (i, elem_var_val) in enumerate(elem_var_vals):
key = 'vals_elem_var{0}eb1'.format(i+1)
self.fh.variables[key][count] = elem_var_val
nod_var_vals = self.update_displ(elem_var_vals)
assert nod_var_vals.shape == (3, 8)
for (i, nod_var_val) in enumerate(nod_var_vals):
key = 'vals_nod_var{0}'.format(i+1)
self.fh.variables[key][count] = nod_var_val
return
def close(self):
"""Close the database file"""
self.fh.close()
class DatabaseFileReader(_DatabaseFile):
def __init__(self, filename):
self.filename = filename
if not os.path.isfile(self.filename):
raise OSError('{0}: no such file'.format(self.filename))
self.fh = NetCDFFile(self.filename, 'r')
self.df = self.read_db()
self.jobid = self.get_jobid()
self.fh.close()
def read_db(self, blk_num=1, elem_num=1):
"""Read the ExodusII database file filename.
Parameters
----------
blk_num : int
The element block number to read
elem_num : int
The element number to read
Returns
-------
df : pandas.DataFrame
A DataFrame with names of variables as columns
Notes
-----
This is a single element reader
"""
from pandas import DataFrame
fh = self.fh
# global/element vars and mapping
num_glo_var = fh.dimensions.get('num_glo_var', 0)
if num_glo_var:
name_glo_var = self.get_glo_var_names()
gmap = dict(zip(name_glo_var, range(len(name_glo_var))))
name_elem_var = self.get_elem_var_names()
emap = dict(zip(name_elem_var, range(len(name_elem_var))))
# retrieve the data from the database
head = ['Time']
if num_glo_var:
head.extend([key for key in name_glo_var])
head.extend([key for key in name_elem_var])
data = []
times = fh.variables['time_whole'].data.flatten()
for (i, time) in enumerate(times):
row = [time]
if num_glo_var:
vals_glo_var = fh.variables['vals_glo_var'].data[i]
for var in name_glo_var:
var_num = gmap[var]
row.append(vals_glo_var[var_num])
for var in name_elem_var:
var_num = emap[var]+1
name = 'vals_elem_var{0}eb1'.format(var_num, blk_num)
row.append(fh.variables[name].data[i, elem_num-1])
data.append(row)
data = np.asarray(data)
if len(head) != data.shape[1]:
raise ValueError('inconsistent data')
return DataFrame(data, columns=head)
| bsd-3-clause |
ameya30/IMaX_pole_data_scripts | my_scripts/noise_test.py | 1 | 1663 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 20 14:55:38 2017
@author: prabhu
"""
from scipy.io import readsav
import numpy as np
#from matplotlib import pyplot as plt
from astropy.io import fits
unbin = readsav("/home/prabhu/sunrise_holly/movie_data/mk_magneto_tr2_reduc_rnr_300_21.sav")
bind = fits.open("/home/prabhu/sunrise_holly/binned_cycles/binned_tr2_mk_restor_300_21_2.fits")
#for bi=2
y1,y2,x1,x2 = 120,170,125,400
#for bi=3
#y1,y2,x1,x2 = 125,145,130,190
#for bi=4
#y1,y2,x1,x2 = 80,105,110,150
#not restored
qcont_unbin_nr = unbin['iidn'][1,4,:,:]
icont_unbin_nr = unbin['iidn'][0,4,:,:]
qcont_bin_nr = bind[1].data[1,4,:,:]
icont_bin_nr = bind[1].data[0,4,:,:]
#fig = plt.figure(figsize=(12,12))
#ax = plt.axes()
#im = plt.imshow(qcont_unbin_nr,cmap='gray')
#plt.show()
#plt.suptitle('qcont')
#fig1 = plt.figure(figsize=(12,12))
#ax1 = plt.axes()
#im1 = plt.imshow(icont_unbin_nr,cmap='gray')
#plt.show()
#plt.suptitle('icont')
nqcont_unbin_nr = qcont_unbin_nr/icont_unbin_nr
nqcont_bin_nr = qcont_bin_nr/icont_bin_nr
std_unbin_nr = np.std(nqcont_unbin_nr[286:336,136:382])
std_bin_nr = np.std(nqcont_bin_nr[y1:y2,x1:x2])
#restored
qcont_unbin_r = unbin['iid'][1,4,:,:]
icont_unbin_r = unbin['iid'][0,4,:,:]
qcont_bin_r = bind[0].data[1,4,:,:]
icont_bin_r = bind[0].data[0,4,:,:]
nqcont_unbin_r = qcont_unbin_r/icont_unbin_r
nqcont_bin_r = qcont_bin_r/icont_bin_r
std_unbin_r = np.std(nqcont_unbin_r[286:336,136:382])
std_bin_r = np.std(nqcont_bin_r[y1:y2,x1:x2])
print("the ratio of nonrestored unbin/bin")
print(std_unbin_nr/std_bin_nr)
print("the ratio of restored unbin/bin")
print(std_unbin_r/std_bin_r) | mit |
ppries/tensorflow | tensorflow/contrib/learn/python/learn/preprocessing/tests/categorical_test.py | 30 | 2249 | # encoding: utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Categorical tests."""
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.learn_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.preprocessing import categorical
class CategoricalTest(tf.test.TestCase):
"""Categorical tests."""
def testSingleCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=1)
x = cat_processor.fit_transform([["0"], [1], [float("nan")], ["C"], ["C"],
[1], ["0"], [np.nan], [3]])
self.assertAllEqual(list(x), [[2], [1], [0], [3], [3], [1], [2], [0], [0]])
def testSingleCategoricalProcessorPandasSingleDF(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
cat_processor = categorical.CategoricalProcessor()
data = pd.DataFrame({"Gender": ["Male", "Female", "Male"]})
x = list(cat_processor.fit_transform(data))
self.assertAllEqual(list(x), [[1], [2], [1]])
def testMultiCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=0,
share=False)
x = cat_processor.fit_transform([["0", "Male"], [1, "Female"], ["3", "Male"]
])
self.assertAllEqual(list(x), [[1, 1], [2, 2], [3, 1]])
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
sixy6e/zonal-stats | workflow/classifier_workflow.py | 1 | 8773 | #!/usr/bin/env python
import luigi
import os
from os.path import join as pjoin, exists, dirname
import cPickle as pickle
import glob
import argparse
import logging
import pandas
import rasterio
from datacube.api.model import DatasetType
from classifier import classifier
from zonal_stats import zonal_stats
from zonal_stats import zonal_class_distribution
from image_processing.segmentation import rasterise_vector
CONFIG = luigi.configuration.get_config()
CONFIG.add_config_path(pjoin(dirname(__file__), 'config.cfg'))
class RasteriseTask(luigi.Task):
"""
Computes the rasterisation for a cell.
"""
out_dir = luigi.Parameter()
def requires(self):
return []
def output(self):
out_fname = pjoin(self.out_dir, 'RasteriseTask.completed')
return luigi.LocalTarget(out_fname)
def run(self):
out_fname = pjoin(self.out_dir,
CONFIG.get('outputs', 'rasterise_filename'))
ds_list_fname = pjoin(self.out_dir,
CONFIG.get('outputs', 'query_filename'))
with open(ds_list_fname, 'r') as infile:
ds_list = pickle.load(infile)
vector_fname = CONFIG.get('work', 'vector_filename')
img_fname = ds_list[0].datasets[DatasetType.FC25].path
with rasterio.open(img_fname) as src:
crs = src.crs
transform = src.affine
height = src.height
width = src.width
res = rasterise_vector(vector_fname, shape=(height, width),
transform=transform, crs=crs)
kwargs = {'count': 1,
'width': width,
'height': height,
'crs': crs,
'transform': transform,
'dtype': res.dtype.name,
'nodata': 0}
with rasterio.open(out_fname, 'w', **kwargs) as src:
src.write(1, res)
# We could just set the image as the Luigi completion target...
with self.output().open('w') as outf:
outf.write('Complete')
class ClassifierStatsTask(luigi.Task):
"""
Computes a zonal class distribution task for the required dataset.
"""
idx = luigi.IntParameter()
out_fname = luigi.Parameter()
def requires(self):
return [RasteriseTask(dirname(self.out_fname))]
def output(self):
return luigi.LocalTarget(self.out_fname)
def run(self):
rasterised_fname = pjoin(dirname(self.out_fname),
CONFIG.get('outputs', 'rasterise_filename'))
ds_list_fname = pjoin(dirname(self.out_fname),
CONFIG.get('outputs', 'query_filename'))
with open(ds_list_fname, 'r') as infile:
ds_list = pickle.load(infile)
dataset = ds_list[self.idx]
nbar_ds = dataset.datasets[DatasetType.ARG25]
pq_ds = dataset.datasets[DatasetType.PQ25]
classified_img = classifier(nbar_ds, pq_ds)
# hard code; as this will be short lived due to agdc-v2 development
class_ids = [0, 1, 2, 3, 4, 5]
with rasterio.open(rasterised_fname, 'r') as src:
zones_img = src.read(1)
result = zonal_class_distribution(classified_img, zones_img,
class_ids=class_ids)
# Set the timestamp
result['Timestamp'] = dataset.start_datetime
# Open the output hdf5 file
store = pandas.HDFStore(self.output().path)
# Write the dataframe
store.append('data', result)
# Save and close the file
store.close()
class CellStatsTask(luigi.Task):
"""
For a given cell define a classifier stats task for each required Dataset.
"""
out_dir = luigi.Parameter()
def requires(self):
base_name = CONFIG.get('outputs', 'stats_filename_format')
base_name = pjoin(self.out_dir, base_name)
ds_list_fname = pjoin(self.out_dir,
CONFIG.get('outputs', 'query_filename'))
with open(ds_list_fname, 'r') as infile:
ds_list = pickle.load(infile)
targets = []
for idx, ds in enumerate(ds_list):
timestamp = bytes(ds.start_datetime).replace(' ', '-')
out_fname = base_name.format(timestamp)
targets.append(ClassifierStatsTask(idx, out_fname))
return targets
def output(self):
out_fname = pjoin(self.out_dir, 'CellStatsTask.completed')
return luigi.LocalTarget(out_fname)
def run(self):
with self.output().open('w') as outf:
outf.write('Completed')
class CombineCellStatsTask(luigi.Task):
"""
Combines all stats files from a single cell into a single file.
"""
out_dir = luigi.Parameter()
def requires(self):
return [CellStatsTask(self.out_dir)]
def output(self):
out_fname = pjoin(self.out_dir, 'CombineCellStatsTask.completed')
return luigi.LocalTarget(out_fname)
def run(self):
# Get a list of the stats files for each timeslice
stats_files_list = glob.glob(pjoin(self.out_dir, '*.h5'))
# Create an output file that we can continually append data
out_fname = pjoin(self.out_dir,
CONFIG.get('outputs',
'combined_cell_stats_filename'))
combined_store = pandas.HDFStore(out_fname)
store = pandas.HDFStore(stats_files_list[0])
# If there is nothing in the first file there will be nothing for
# every file
if '/data' in store.keys():
# We have data to retrieve
headings = store['data'].columns.tolist()
store.close()
df = pandas.DataFrame(columns=headings)
for sfile in stats_files_list:
store = pandas.HDFStore(sfile, 'r')
df = df.append(store['data'])
store.close()
df.reset_index(inplace=True)
# Write to disk
combined_store.append('data', df)
with self.output().open('w') as outf:
outf.write('Completed')
class RunCombineCellStatsTasks(luigi.Task):
"""
Issues CombineCellStatsTask's to each cell associated
with the tile defined by the start and end index.
"""
idx1 = luigi.IntParameter()
idx2 = luigi.IntParameter()
def requires(self):
base_out_dir = CONFIG.get('work', 'output_directory')
cells_list_fname = pjoin(base_out_dir,
CONFIG.get('outputs', 'cells_list'))
with open(cells_list_fname, 'r') as infile:
cells = pickle.load(infile)
tasks = []
for cell in cells[self.idx1:self.idx2]:
out_dir = pjoin(base_out_dir, cell)
tasks.append(CombineCellStatsTask(out_dir))
return tasks
def output(self):
out_dir = CONFIG.get('work', 'output_directory')
out_fname = pjoin(out_dir,
'RunCombineCellStatsTasks_{}:{}.completed')
out_fname = out_fname.format(self.idx1, self.idx2)
return luigi.LocalTarget(out_fname)
def run(self):
with self.output().open('w') as outf:
outf.write('Completed')
if __name__ == '__main__':
# Setup command-line arguments
desc = "Processes zonal stats for a given set of cells."
hlp = ("The tile/chunk index to retieve from the tiles list. "
"(Needs to have been previously computed to a file named tiles.pkl")
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--tile', type=int, help=hlp)
parsed_args = parser.parse_args()
tile_idx = parsed_args.tile
# setup logging
log_dir = CONFIG.get('work', 'logs_directory')
if not exists(log_dir):
os.makedirs(log_dir)
logfile = "{log_path}/stats_workflow_{uname}_{pid}.log"
logfile = logfile.format(log_path=log_dir, uname=os.uname()[1],
pid=os.getpid())
logging_level = logging.INFO
logging.basicConfig(filename=logfile, level=logging_level,
format=("%(asctime)s: [%(name)s] (%(levelname)s) "
"%(message)s "))
# Get the list of tiles (groups of cells that each node will operate on
tiles_list_fname = pjoin(CONFIG.get('work', 'output_directory'),
CONFIG.get('outputs', 'tiles_list'))
with open(tiles_list_fname, 'r') as in_file:
tiles = pickle.load(in_file)
# Initialise the job
tile = tiles[tile_idx]
tasks = [RunCombineCellStatsTasks(tile[0], tile[1])]
luigi.build(tasks, local_scheduler=True, workers=16)
luigi.run()
| apache-2.0 |
AlexanderFabisch/scikit-learn | sklearn/ensemble/partial_dependence.py | 251 | 15097 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause |
bobquest33/peach | tutorial/fuzzy-logic/defuzzification.py | 6 | 3885 | ################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/defuzzification.py
# Defuzzification methods
################################################################################
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace:
import numpy
from peach.fuzzy import *
# The main application of fuzzy logic is in the form of fuzzy controllers. The
# last step on the control is the defuzzification, or returning from fuzzy sets
# to crisp numbers. Peach has a number of ways of dealing with that operation.
# Here we se how to do that.
# Just to illustrate the method, we will create arbitrary fuzzy sets. In a
# controller, these functions would be obtained by fuzzification and a set of
# production rules. But our intent here is to show how to use the
# defuzzification methods. Remember that instantiating Membership functions
# gives us a function, so we must apply it over our domain.
y = numpy.linspace(-30.0, 30.0, 500)
gn = Triangle(-30.0, -20.0, -10.0)(y)
pn = Triangle(-20.0, -10.0, 0.0)(y)
z = Triangle(-10.0, 0.0, 10.0)(y)
pp = Triangle(0.0, 10.0, 20.0)(y)
gp = Triangle(10.0, 20.0, 30.0)(y)
# Here we simulate the response of the production rules of a controller. In it,
# a controller will associate a membership value with every membership function
# of the output variable. Here we do that. You will notice that no membership
# values are associated with pp and gp functions. That is because we are
# supposing that they are 0, effectivelly eliminating those functions (we plot
# them anyway.
mf = gn & 0.33 | pn & 0.67 | z & 0.25
# Here are the defuzzification methods. Defuzzification methods are functions.
# They receive, as their first parameter, the membership function (or the fuzzy
# set) and as second parameter the domain of the output variable. Every method
# works that way -- and if you want to implement your own, use this signature.
# Notice that it is a simple function, not a class that is instantiated.
centroid = Centroid(mf, y) # Centroid method
bisec = Bisector(mf, y) # Bissection method
som = SmallestOfMaxima(mf, y) # Smallest of Maxima
lom = LargestOfMaxima(mf, y) # Largest of Maxima
mom = MeanOfMaxima(mf, y) # Mean of Maxima
# We will use the matplotlib module to plot these functions. We save the plot in
# a figure called 'defuzzification.png'.
try:
from matplotlib import *
from matplotlib.pylab import *
figure(1).set_size_inches(8., 4.)
a1 = axes([ 0.125, 0.10, 0.775, 0.8 ])
ll = [ 0.0, 1.0 ]
a1.hold(True)
a1.plot([ centroid, centroid ], ll, linewidth = 1)
a1.plot([ bisec, bisec ], ll, linewidth = 1)
a1.plot([ som, som ], ll, linewidth = 1)
a1.plot([ lom, lom ], ll, linewidth = 1)
a1.plot([ mom, mom ], ll, linewidth = 1)
a1.plot(y, gn, 'k--')
a1.plot(y, pn, 'k--')
a1.plot(y, z, 'k--')
a1.plot(y, pp, 'k--')
a1.plot(y, gp, 'k--')
a1.fill(y, mf, 'gray')
a1.set_xlim([ -30, 30 ])
a1.set_ylim([ -0.1, 1.1 ])
a1.set_xticks(linspace(-30, 30, 7.0))
a1.set_yticks([ 0.0, 1.0 ])
a1.legend([ 'Centroid = %7.4f' % centroid,
'Bisector = %7.4f' % bisec,
'SOM = %7.4f' % som,
'LOM = %7.4f' % lom,
'MOM = %7.4f' % mom ])
savefig("defuzzification.png")
except ImportError:
print "Defuzzification results:"
print " Centroid = %7.4f" % centroid
print " Bisector = %7.4f" % bisec
print " SOM = %7.4f" % som
print " LOM = %7.4f" % lom
print " MOM = %7.4f" % mom | lgpl-2.1 |
roxyboy/scikit-learn | sklearn/decomposition/pca.py | 192 | 23117 | """ Principal Component Analysis
"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Michael Eickenberg <[email protected]>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_dot, fast_logdet, randomized_svd
from ..utils.validation import check_is_fitted
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
Data spectrum.
rank: int
Tested rank value.
n_samples: int
Number of samples.
n_features: int
Number of features.
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
components_ : array, [n_components, n_features]
Principal axes in feature space, representing the directions of
maximum variance in the data.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
n_components_ : int
The estimated number of components. Relevant when n_components is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = check_array(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
explained_variance_ = (S ** 2) / n_samples
explained_variance_ratio_ = (explained_variance_ /
explained_variance_.sum())
components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(explained_variance_,
n_samples, n_features)
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d"
% (n_components, n_features))
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < n_features:
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
# store n_samples to revert whitening when getting covariance
self.n_samples_ = n_samples
self.components_ = components_[:n_components]
self.explained_variance_ = explained_variance_[:n_components]
explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_
self.n_components_ = n_components
return (U, S, V)
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def transform(self, X):
"""Apply the dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mean_')
if self.whiten:
return fast_dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
def score_samples(self, X):
"""Return the log-likelihood of each sample
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Read more in the :ref:`User Guide <RandomizedPCA>`.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 3 by default.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=3, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components=None, copy=True, iterated_power=3,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = fast_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
check_is_fitted(self, 'mean_')
X_original = fast_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
| bsd-3-clause |
jesse-norris/ml | nn/displayex3.py | 1 | 2357 | from matplotlib.pyplot import figure, show
from matplotlib.cm import binary
from numpy import zeros, linspace, arange, flip
from numpy.random import random_integers
### Module built to display '/data/ex3data1.csv'
def displayData(X):
""" Selects and displays a random sample of 100 of the hand-written images. """
fig = figure(figsize=(6,6))
ax = fig.add_subplot(111)
images = zeros((10*20, 10*20)) # Empty 200x200 array for 10x10 images
# Randomly select 100 integers between 0 and 4999 (inclusive)
indicies = random_integers(low=0, high=4999, size=100)
# This for-loop will step through the randomly selected indicies
# Placing the selected 20x20 image into the larger 200x200 'images' array
# The index will be place in order of (0,0), (0,1), ... (1,0), (1,1), ... (9,8), (9,9)
# Taking into acount that each image is itself a 20x20 array
# *** Fundamentally this loop puts many smaller matricies into a larger matrix ***
for i, index in enumerate(indicies):
row = [int(int(i/10)*20), int(int(i/10+1)*20)]
col = [int((i%10)*20), int((i%10+1)*20)]
images[row[0]:row[1],col[0]:col[1]] = X[index].reshape(20,20)
# ### Plot text corresponding to the image index along with the image (looks too cluttered)
# plt.text(int(row[0]+1), int(col[0]+19), "{}".format(index+1), fontsize=5,
# bbox={"boxstyle":"Round, pad=0.05", "facecolor":"white", "edgecolor":"black", "lw":0.5)
# Plot a grid to highlight each image individually
for line in linspace(0, 200, 11):
ax.axhline(line, color="k")
ax.axvline(line, color="k")
# Adjust the plot elements to better reflect the data
ax.set_xticks(arange(10, 210, 20))
ax.set_xticklabels(arange(1, 11, 1))
ax.set_yticks(arange(10, 210, 20))
ax.set_yticklabels(flip(arange(1, 11, 1), axis=0))
ax.set_title("Random Sample of 100 Hand-Written Numbers")
# Plot and show the result
ax.imshow(images.T, cmap=binary)
show()
def displayImage(X, index):
""" Specify a single image 'index' to display. """
fig = figure(figsize=(6,6))
ax = fig.add_subplot(111)
# Adjust the plot elements to better reflect the data
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
ax.set_title("Image #{}".format(index))
# Plot and show the result
ax.imshow(X[index].reshape(20,20).T, cmap=binary)
show() | gpl-3.0 |
samzhang111/scikit-learn | sklearn/preprocessing/data.py | 5 | 66593 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# Giorgio Patrini <[email protected]>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils import deprecated
from ..utils.extmath import row_norms
from ..utils.extmath import _incremental_mean_and_var
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale,
mean_variance_axis, incr_mean_variance_axis,
min_max_axis, inplace_row_scale)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
DEPRECATION_MSG_1D = (
"Passing 1d arrays as data is deprecated in 0.17 and will "
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample."
)
def _handle_zeros_in_scale(scale, copy=True):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix}
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.mean(X, axis)
if with_std:
scale_ = np.std(X, axis)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
data_min_ : ndarray, shape (n_features,)
Per feature minimum seen in the data
data_max_ : ndarray, shape (n_features,)
Per feature maximum seen in the data
data_range_ : ndarray, shape (n_features,)
Per feature range (data_max_ - data_min_) seen in the data
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
@property
@deprecated("Attribute data_range will be removed in "
"0.19. Use data_range_ instead")
def data_range(self):
return self.data_range_
@property
@deprecated("Attribute data_min will be removed in "
"0.19. Use data_min_ instead")
def data_min(self):
return self.data_min_
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : array-like, shape [n_samples_, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if sparse.issparse(X):
raise TypeError("MinMaxScaler does no support sparse input. "
"You may consider to use MaxAbsScaler instead.")
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
data_min = np.min(X, axis=0)
data_max = np.max(X, axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next steps
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = ((feature_range[1] - feature_range[0]) /
_handle_zeros_in_scale(data_range))
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed. It cannot be sparse.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
# To allow retro-compatibility, we handle here the case of 1D-input
# From 0.17, 1D-input are deprecated in scaler objects
# Although, we want to allow the users to keep calling this function
# with 1D-input.
# Cast input to array, as we need to check ndim. Prior to 0.17, that was
# done inside the scaler object fit_transform.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True,
dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
var_ : array of floats with shape [n_features]
The variance for each feature in the training set. Used to compute
`scale_`
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
@property
@deprecated("Attribute std_ will be removed in 0.19. Use scale_ instead")
def std_(self):
return self.scale_
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples_, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.with_std:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_, self.var_ = mean_variance_axis(X, axis=0)
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
else:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_ = .0
self.n_samples_seen_ = 0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_)
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
max_abs_ : ndarray, shape (n_features,)
Per feature maximum absolute value.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
"""
def __init__(self, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples_, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.abs(X).max(axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : {array-like, sparse matrix}
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix}
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
# To allow retro-compatibility, we handle here the case of 1D-input
# From 0.17, 1D-input are deprecated in scaler objects
# Although, we want to allow the users to keep calling this function
# with 1D-input.
# Cast input to array, as we need to check ndim. Prior to 0.17, that was
# done inside the scaler object fit_transform.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
elif self.axis == 0:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0., 0., 1.],
[ 1., 2., 3., 4., 6., 9.],
[ 1., 4., 5., 16., 20., 25.]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0.],
[ 1., 2., 3., 6.],
[ 1., 4., 5., 20.]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X, dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K, dtype=FLOAT_DTYPES)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K, copy=copy, dtype=FLOAT_DTYPES)
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {array, sparse matrix}, shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if isinstance(selected, six.string_types) and selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'numpy.float64'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float64, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those categorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause |
ltiao/scikit-learn | examples/linear_model/plot_sgd_weighted_samples.py | 344 | 1458 | """
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
eshaw2/SoftwareSystems | HFC_ch2.5/wget-1.15/src/timing.py | 24 | 1661 | """Example code for Software Systems at Olin College.
Copyright 2014 Allen Downey
License: Creative Commons Attribution-ShareAlike 3.0
"""
import glob
import sys
import matplotlib.pyplot as pyplot
def read_file(filename):
"""Reads a timing file and returns a list of (t, s) pairs.
filename: string timing file generated by the instrumented version
of wget
t is a time in ms
s is the total number of bytes presented to the application layer
"""
res = []
# add a fake packet at the beginning so the connect time is visible
lasts = 1460
for line in open(filename):
t, s = [float(x) for x in line.split()]
res.append((t, lasts))
res.append((t, s))
lasts = s
return res
def make_graph(dirname):
"""Makes a graph of the timing charts in the given directory.
Graphs all files in the directory that match the pattern
timing.[0-9]*.[0-9]*
dirname: string
"""
pattern = '%s/timing.[0-9]*.[0-9]*' % dirname
filenames = glob.glob(pattern)
data = []
for filename in filenames:
pairs = read_file(filename)
data.append(pairs)
for pairs in data:
xs, ys = zip(*pairs)
pyplot.plot(xs, ys, alpha=0.4, linewidth=1)
pyplot.xlabel('time (ms)')
pyplot.ylabel('bytes received')
if dirname == '.':
filename = 'timing.png'
else:
filename = 'timing.%s.png' % dirname
print 'Writing', filename
pyplot.savefig(filename, format='png', dpi=150)
pyplot.show()
def main(script, dirname='.'):
make_graph(dirname)
if __name__ == '__main__':
main(*sys.argv)
| gpl-3.0 |
shusenl/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 299 | 1770 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
| bsd-3-clause |
walterreade/scikit-learn | examples/classification/plot_lda.py | 142 | 2419 | """
====================================================================
Normal and Shrinkage Linear Discriminant Analysis for classification
====================================================================
Shows how shrinkage improves classification.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
n_train = 20 # samples for training
n_test = 200 # samples for testing
n_averages = 50 # how often to repeat classification
n_features_max = 75 # maximum number of features
step = 4 # step size for the calculation
def generate_data(n_samples, n_features):
"""Generate random blob-ish data with noisy features.
This returns an array of input data with shape `(n_samples, n_features)`
and an array of `n_samples` target labels.
Only one feature contains discriminative information, the other features
contain only noise.
"""
X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])
# add non-discriminative features
if n_features > 1:
X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
return X, y
acc_clf1, acc_clf2 = [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
score_clf1, score_clf2 = 0, 0
for _ in range(n_averages):
X, y = generate_data(n_train, n_features)
clf1 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage='auto').fit(X, y)
clf2 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage=None).fit(X, y)
X, y = generate_data(n_test, n_features)
score_clf1 += clf1.score(X, y)
score_clf2 += clf2.score(X, y)
acc_clf1.append(score_clf1 / n_averages)
acc_clf2.append(score_clf2 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(features_samples_ratio, acc_clf1, linewidth=2,
label="Linear Discriminant Analysis with shrinkage", color='navy')
plt.plot(features_samples_ratio, acc_clf2, linewidth=2,
label="Linear Discriminant Analysis", color='gold')
plt.xlabel('n_features / n_samples')
plt.ylabel('Classification accuracy')
plt.legend(loc=1, prop={'size': 12})
plt.suptitle('Linear Discriminant Analysis vs. \
shrinkage Linear Discriminant Analysis (1 discriminative feature)')
plt.show()
| bsd-3-clause |
icdishb/scikit-learn | examples/applications/wikipedia_principal_eigenvector.py | 41 | 7742 | """
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
http://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
http://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in the scikit.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
from sklearn.externals.joblib import Memory
print(__doc__)
###############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
import urllib
print("Downloading data from '%s', please wait..." % url)
opener = urllib.urlopen(url)
open(filename, 'wb').write(opener.read())
print()
###############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = set([source])
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = dict((i, name) for name, i in index_map.iteritems())
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest compenents of the the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <[email protected]>
Dan Schult <[email protected]>
Pieter Swart <[email protected]>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel()
scores = np.ones(n, dtype=np.float32) / n # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
| bsd-3-clause |
eoinmurray/icarus | Experiments/utils/plot_fidelity_curves.py | 1 | 3909 |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import Grid
def normalize(arr):
x = arr[:,0]
y = arr[:,1]
hold_max = []
hold_int = []
pulse_width = 25
for j in xrange(int(x.max()/pulse_width)):
minIdx = np.abs(x - pulse_width*j).argmin()
maxIdx = np.abs(x - pulse_width*(j+1)).argmin()
peakX = x[minIdx: maxIdx]
peakY = y[minIdx:maxIdx]
if j != 6:
hold_max.append( np.max(peakY) )
hold_int.append( np.sum(peakY) )
else:
delay_peak = np.sum(peakY)
y = y/np.mean(hold_max)
return np.array(np.vstack((x,y)).T), calculate_g2(delay_peak, hold_int)
def calculate_g2(delay_peak, hold_int):
if np.array(hold_int).mean() > 0:
g2 = delay_peak/np.array(hold_int).mean()
g2 = np.around(g2, decimals=3)
return g2
else:
return 0
def plot_corrs(arr11, arr12, arr21, arr22, arr31, arr32, title, do_normalize = True):
delay = 180 - 20
x_min = - 100
x_max = 100
fidelity = 0.
if do_normalize:
arr11, g2_11 = normalize(arr11)
arr12, g2_12 = normalize(arr12)
arr21, g2_21 = normalize(arr21)
arr22, g2_22 = normalize(arr22)
arr31, g2_31 = normalize(arr31)
arr32, g2_32 = normalize(arr32)
grect = (g2_11 - g2_12) / (g2_11 + g2_12)
gdiag = (g2_21 - g2_22) / (g2_21 + g2_22)
gcirc = (g2_31 - g2_32) / (g2_31 + g2_32)
fidelity = (1 + grect + gdiag - gcirc)/4
fig = plt.figure(1, (12, 5))
fig.subplots_adjust(left=0.05, right=0.98)
fig.suptitle('Fidelity: ' + np.array(fidelity).astype('|S5').tostring() + ', ' + title)
grid = Grid(fig, 111, nrows_ncols = (1, 3), axes_pad = 0.3, label_mode = 'l')
grid[0].plot(arr11[:,0] - delay, arr11[:,1], 'b-', arr12[:,0] - delay, arr12[:,1], 'r-')
grid[0].set_xlim([x_min, x_max])
grid[0].set_ylabel('Counts', fontsize = 14) ; grid[0].set_xlabel('$\\tau(ns)$', fontsize=14)
grid[0].legend(['HH', 'HV'])
grid[1].plot(arr21[:,0] - delay, arr21[:,1], 'b-', arr22[:,0] - delay, arr22[:,1], 'r-')
grid[1].set_xlim([x_min, x_max])
grid[1].set_xlabel('$\\tau(ns)$', fontsize=14)
grid[1].legend(['DA', 'DD'])
grid[2].plot(arr31[:,0] - delay, arr31[:,1], 'b-', arr32[:,0] - delay, arr32[:,1], 'r-')
grid[2].set_xlim([x_min, x_max])
grid[2].set_xlabel('$\\tau(ns)$', fontsize=14)
grid[2].legend(['LL', 'LR'])
plt.show()
def plot_by_folder(full_name = None, name = None, title = '', do_normalize = True):
import datetime
today = datetime.datetime.now().strftime("%Y-%m-%d")
if name:
plot_corrs(
np.loadtxt('out/'+today+'/'+name+'/linear D1D3.txt', delimiter=','),
np.loadtxt('out/'+today+'/'+name+'/linear D2D3.txt', delimiter=','),
np.loadtxt('out/'+today+'/'+name+'/diag D1D3.txt', delimiter=','),
np.loadtxt('out/'+today+'/'+name+'/diag D2D3.txt', delimiter=','),
np.loadtxt('out/'+today+'/'+name+'/circ D1D3.txt', delimiter=','),
np.loadtxt('out/'+today+'/'+name+'/circ D2D3.txt', delimiter=','),
title,
do_normalize
)
if full_name:
plot_corrs(
np.loadtxt(full_name + '/linear D1D3.txt', delimiter=','),
np.loadtxt(full_name + '/linear D2D3.txt', delimiter=','),
np.loadtxt(full_name + '/diag D1D3.txt', delimiter=','),
np.loadtxt(full_name + '/diag D2D3.txt', delimiter=','),
np.loadtxt(full_name + '/circ D1D3.txt', delimiter=','),
np.loadtxt(full_name + '/circ D2D3.txt', delimiter=','),
title,
do_normalize
)
if __name__ == "__main__":
import sys
plot_by_folder(str(sys.argv[1])) | mit |
wavelets/BayesDataAnalysisWithPyMC | BayesDataAnalysisWithPymc/SimpleLinearRegressionPyMC.py | 2 | 3653 | # -*- coding: utf-8 -*-
'''Hierarchical Model for estimation of simple linear regression
parameter via MCMC.
Python (PyMC) adaptation of the R code from "Doing Bayesian Data Analysis",
by John K. Krushcke.
More info: http://doingbayesiandataanalysis.blogspot.com.br/
'''
from __future__ import division
import pymc
import numpy as np
from matplotlib import pyplot as plot
from plot_post import plot_post
from normalize import (normalize, convert_intercept,
convert_slope, convert_tau_sigma)
from os import path
# Code to find the data path.
scr_dir = path.dirname(__file__)
file_name = 'McIntyre1994data.csv'
comp_dir = path.join(scr_dir, 'Data', file_name)
# So, let's be lazy: the data are from McIntyre cigarette weight.
# Use numpy to load the data we want directly in the appropriate variables.
y, x = np.genfromtxt(comp_dir, delimiter=',',
skip_header=1, usecols=(1, 3), unpack=True)
# Let's try normalizing, as suggested by Krushcke.
zy = normalize(y)
zx = normalize(x)
# Define the priors for the model.
# First, normal priors for the slope and intercept.
beta0 = pymc.Normal('b0', 0.0, 1.0e-10)
beta1 = pymc.Normal('b1', 0.0, 1.0e-10)
# Then, gamma and uniform prior for precision and DoF.
# Krushcke suggests the use of a Student's t distribution for the likelihood.
# It makes the estimation more robust in the presence of outliers.
# We will use Krushcke's DoF transformation using a gain constant.
tau = pymc.Gamma('tau', 0.01, 0.01)
udf = pymc.Uniform('udf', 0.0, 1.0)
tdf_gain = 1
@pymc.deterministic
def tdf(udf=udf, tdf_gain=tdf_gain):
return 1 - tdf_gain * np.log(1 - udf)
# Defining the linear relationship between variables.
@pymc.deterministic
def mu(beta0=beta0, beta1=beta1, x=zx):
mu = beta0 + beta1 * x
return mu
# Finally, the likelihood using Student's t distribution.
like = pymc.NoncentralT('like', mu=mu, lam=tau, nu=tdf,
value=zy, observed=True)
# For those who want a more traditional linear model:
#like = pymc.Normal('like', mu=mu, tau=tau, value=zy, observed=True)
# The model is ready! Sampling code below.
model = pymc.Model([beta0, beta1, tau, tdf])
fit = pymc.MAP(model)
fit.fit()
mcmc = pymc.MCMC(model)
mcmc.sample(iter=100000, burn=50000, thin=10)
# Collect the sample values for the parameters.
z0_sample = mcmc.trace('b0')[:]
z1_sample = mcmc.trace('b1')[:]
ztau_sample = mcmc.trace('tau')[:]
tdf_sample = mcmc.trace('tdf')[:]
# Convert the data back to scale.
b0_sample = convert_intercept(x, y, z0_sample, z1_sample)
b1_sample = convert_slope(x, y, z1_sample)
sigma_sample = convert_tau_sigma(y, ztau_sample)
# Plot the results
plot.figure(figsize=(8.0, 8.0))
plot.subplot(221)
plot_post(b0_sample, title=r'$\beta_0$ posterior')
plot.subplot(222)
plot_post(b1_sample, title=r'$\beta_1$ posterior')
plot.subplot(223)
plot_post(sigma_sample, title=r'$\sigma$ posterior')
plot.subplot(224)
plot_post(tdf_sample, title=r'tDF posterior')
plot.subplots_adjust(wspace=0.2, hspace=0.2)
# Plot the data with some credible regression lines.
plot.figure(figsize=(8.0, 8.0))
plot.scatter(x, y, c='k', s=60)
plot.title('Data points with credible regression lines')
x1 = plot.axis()[0]
x2 = plot.axis()[1]
plot.autoscale(enable=False)
for line in range(0, len(b1_sample), len(b1_sample) // 50):
plot.plot([x1, x2], [b0_sample[line] + b1_sample[line] * x1,
b0_sample[line] + b1_sample[line] * x2],
c='#348ABD', lw=1)
plot.show()
| mit |
joernhees/scikit-learn | sklearn/neighbors/unsupervised.py | 29 | 4756 | """Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`kneighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Affects only :meth:`kneighbors` and :meth:`kneighbors_graph` methods.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> nbrs = neigh.radius_neighbors([[0, 0, 1.3]], 0.4, return_distance=False)
>>> np.asarray(nbrs[0][0])
array(2)
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
| bsd-3-clause |
asteca/ASteCA | packages/out/mp_bestfit_CMD.py | 1 | 9399 |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.offsetbox as offsetbox
from matplotlib.colors import LinearSegmentedColormap, LogNorm, ListedColormap
def pl_mps_phot_diag(
gs, gs_y1, gs_y2, fig, x_min_cmd, x_max_cmd, y_min_cmd, y_max_cmd,
x_ax, y_ax, v_min_mp, v_max_mp, obs_x, obs_y, obs_MPs, err_bar,
cl_sz_pt, hess_xedges, hess_yedges, x_isoch, y_isoch, phot_Nsigma,
lkl_method):
"""
Star's membership probabilities on cluster's photometric diagram.
"""
ax = plt.subplot(gs[gs_y1:gs_y2, 0:2])
# Set axis labels
plt.xlabel('$' + x_ax + '$')
plt.ylabel('$' + y_ax + '$')
# Add text box.
if gs_y1 == 0:
text = '$N_{{fit}}={}$'.format(len(obs_MPs))
ob = offsetbox.AnchoredText(text, loc=4)
ob.patch.set(boxstyle='square,pad=-0.2', alpha=0.85)
ax.add_artist(ob)
if gs_y1 == 0:
txt = r" + $\mathcal{N}(\mu,\sigma^2)$" if phot_Nsigma else ""
ax.set_title("Observed" + txt)
# Plot grid.
gls, glw, gc = plt.rcParams['grid.linestyle'],\
plt.rcParams['grid.linewidth'], plt.rcParams['grid.color']
for x_ed in hess_xedges:
# vertical lines
ax.axvline(x_ed, linestyle=gls, lw=glw, color=gc, zorder=1)
for y_ed in hess_yedges:
# horizontal lines
ax.axhline(y_ed, linestyle=gls, lw=glw, color=gc, zorder=1)
# This reversed colormap means higher prob stars will look redder.
rmap = plt.cm.get_cmap('RdYlBu_r')
# If the 'tolstoy' method was used AND the stars have a range of colors.
# The 'dolphin / tremmel' likelihoods do not use MPs in the fit, so it's
# confusing to color stars as if they did.
if (v_min_mp != v_max_mp) and lkl_method == 'tolstoy':
col_select_fit, isoch_col = obs_MPs, 'g'
plot_colorbar = True
else:
col_select_fit, isoch_col = '#519ddb', 'r'
plot_colorbar = False
# Plot stars used in the best fit process.
sca = plt.scatter(
obs_x, obs_y, marker='o', c=col_select_fit, s=cl_sz_pt, cmap=rmap,
lw=0.3, edgecolor='k', vmin=v_min_mp, vmax=v_max_mp, zorder=4)
# Plot sigma region
if phot_Nsigma:
cGreys = plt.cm.get_cmap('Greys', 100)
cmap = ListedColormap(cGreys(range(65)))
# Extend one bin upwards and to the left
ybin = abs(hess_yedges[1] - hess_yedges[0])
hess_yedges = [hess_yedges[0] - ybin] + list(hess_yedges)
xbin = abs(hess_xedges[1] - hess_xedges[0])
hess_xedges = [hess_xedges[0] - xbin] + list(hess_xedges)
plt.hist2d(*phot_Nsigma, bins=(
hess_xedges, hess_yedges), cmap=cmap, norm=LogNorm())
# Plot isochrone.
plt.plot(x_isoch, y_isoch, isoch_col, lw=1., zorder=6)
plt.xlim(x_min_cmd, x_max_cmd)
plt.ylim(y_min_cmd, y_max_cmd)
# If list is not empty, plot error bars at several values. The
# prep_plots.error_bars() is not able to handle the color-color diagram.
x_val, mag_y, xy_err = err_bar
if x_val:
xye_i = {
'0': (mag_y, 0, 1), '2': (mag_y, 0, 2),
'4': (np.linspace(min(obs_y), max(obs_y), len(x_val)), 1, 2)}
plt.errorbar(
x_val, xye_i[str(gs_y1)][0], yerr=xy_err[xye_i[str(gs_y1)][1]],
xerr=xy_err[xye_i[str(gs_y1)][2]],
fmt='k.', lw=0.8, ms=0., zorder=4)
# For plotting the colorbar (see bottom of make_D_plot file).
trans = ax.transAxes + fig.transFigure.inverted()
return plot_colorbar, sca, trans
def pl_hess_diag(
gs, gs_y1, gs_y2, x_min_cmd, x_max_cmd, y_min_cmd, y_max_cmd, x_ax, y_ax,
lkl_method, hess_xedges, hess_yedges, hess_x, hess_y, HD):
"""
Hess diagram of observed minus best match synthetic cluster.
"""
ax = plt.subplot(gs[gs_y1:gs_y2, 2:4])
# Set plot limits
plt.xlim(x_min_cmd, x_max_cmd)
plt.ylim(y_min_cmd, y_max_cmd)
# Set axis labels
plt.xlabel('$' + x_ax + '$')
if gs_y1 == 0:
ax.set_title("Hess diagram (observed - synthetic)")
gls, glw, gc = plt.rcParams['grid.linestyle'],\
plt.rcParams['grid.linewidth'], plt.rcParams['grid.color']
for x_ed in hess_xedges:
# vertical lines
ax.axvline(x_ed, linestyle=gls, lw=glw, color=gc, zorder=1)
for y_ed in hess_yedges:
# horizontal lines
ax.axhline(y_ed, linestyle=gls, lw=glw, color=gc, zorder=1)
if HD.any():
# Add text box.
if HD.min() < 0:
plt.scatter(-100., -100., marker='s', lw=0., s=60, c='#0B02F8',
label='{}'.format(int(HD.min())))
if HD.max() > 0:
plt.scatter(-100., -100., marker='s', lw=0., s=60, c='#FB0605',
label='{}'.format(int(HD.max())))
# Define custom colorbar.
if HD.min() == 0:
cmap = LinearSegmentedColormap.from_list(
'mycmap', [(0, 'white'), (1, 'red')])
else:
# Zero point for empty bins which should be colored in white.
zero_pt = (0. - HD.min()) / float(HD.max() - HD.min())
N = 256.
zero_pt0 = np.floor(zero_pt * (N - 1)) / (N - 1)
zero_pt1 = np.ceil(zero_pt * (N - 1)) / (N - 1)
cmap = LinearSegmentedColormap.from_list(
'mycmap', [(0, 'blue'), (zero_pt0, 'white'),
(zero_pt1, 'white'), (1, 'red')], N=N)
ax.pcolormesh(hess_x, hess_y, HD, cmap=cmap, vmin=HD.min(),
vmax=HD.max(), zorder=1)
# Legend.
handles, labels = ax.get_legend_handles_labels()
leg = ax.legend(
handles, labels, loc='lower right', scatterpoints=1, ncol=2,
columnspacing=.2, handletextpad=-.3)
leg.get_frame().set_alpha(0.7)
def pl_bf_synth_cl(
gs, gs_y1, gs_y2, x_min_cmd, x_max_cmd, y_min_cmd, y_max_cmd, x_ax, y_ax,
hess_xedges, hess_yedges, x_synth, y_synth, sy_sz_pt, binar_idx, IMF_name,
R_V, best_sol, p_err, x_isoch, y_isoch, lkl_method, bin_method,
evol_track, D3_sol):
"""
Best fit synthetic cluster obtained.
"""
ax = plt.subplot(gs[gs_y1:gs_y2, 4:6])
# Set plot limits
plt.xlim(x_min_cmd, x_max_cmd)
plt.ylim(y_min_cmd, y_max_cmd)
# Set axis labels
plt.xlabel('$' + x_ax + '$')
if gs_y1 == 0:
ax.set_title("Synthetic ({} solution)".format(D3_sol))
# Add text box
text = r'$({};\,{})$'.format(lkl_method, bin_method)
ob = offsetbox.AnchoredText(text, pad=.2, loc=1)
ob.patch.set(alpha=0.85)
ax.add_artist(ob)
gls, glw, gc = plt.rcParams['grid.linestyle'],\
plt.rcParams['grid.linewidth'], plt.rcParams['grid.color']
for x_ed in hess_xedges:
# vertical lines
ax.axvline(x_ed, linestyle=gls, lw=glw, color=gc, zorder=1)
for y_ed in hess_yedges:
# horizontal lines
ax.axhline(y_ed, linestyle=gls, lw=glw, color=gc, zorder=1)
# Single systems
plt.scatter(
x_synth[~binar_idx], y_synth[~binar_idx], marker='o', s=sy_sz_pt,
c='#519ddb', lw=0.3, edgecolor='k', zorder=2)
# Binary systems
plt.scatter(
x_synth[binar_idx], y_synth[binar_idx], marker='o', s=sy_sz_pt,
c='#F34C4C', lw=0.3, edgecolor='k', zorder=3)
# Plot isochrone.
plt.plot(x_isoch, y_isoch, '#21B001', lw=1., zorder=6)
if gs_y1 == 0:
# Add text box
text1 = '$N_{{synth}} = {}$'.format(len(x_synth))
ob = offsetbox.AnchoredText(text1, pad=.2, loc=3)
ob.patch.set(alpha=0.85)
ax.add_artist(ob)
# Add text box to the right of the synthetic cluster.
ax_t = plt.subplot(gs[gs_y1:gs_y2, 6:7])
ax_t.axis('off') # Remove axis from frame.
t1 = r'$Synthetic\;cluster\;parameters$' + '\n[Tracks: {}]'.format(
evol_track)
t2 = r'$IMF \hspace{{3.}}:\;{}$'.format(
IMF_name.replace('_', r'\;').title())
t3 = r'$R_{{V}} \hspace{{3.2}}=\;{}$'.format(R_V)
t4 = r'$z \hspace{{3.9}}=\;{:.5f}\pm {:.5f}$'.format(
best_sol[0], p_err[0][2])
t5 = r'$\log(age) \hspace{{0.17}}=\;{:.3f}\pm {:.3f}$'.format(
best_sol[1], p_err[1][2])
t6 = r'$E_{{(B-V)}} \hspace{{1.35}}=\;{:.3f}\pm {:.3f}$'.format(
best_sol[2], p_err[2][2])
t7 = r'$(m-M)_{{0}}=\;{:.3f} \pm {:.3f}$'.format(
best_sol[3], p_err[3][2])
t8 = r'$M\,(M_{{\odot}}) \hspace{{1.07}} =\;{:.0f}\pm {:.0f}$'.format(
best_sol[4], p_err[4][2])
t9 = r'$b_{{frac}} \hspace{{2.37}}=\;{:.2f}\pm {:.2f}$'.format(
best_sol[5], p_err[5][2])
text = t1 + '\n\n' + t2 + '\n' + t3 + '\n' + t4 + '\n' + t5 + '\n' +\
t6 + '\n' + t7 + '\n' + t8 + '\n' + t9
ob = offsetbox.AnchoredText(text, pad=1, loc=6, borderpad=-5)
ob.patch.set(alpha=0.85)
ax_t.add_artist(ob)
def plot(N, *args):
"""
Handle each plot separately.
"""
plt_map = {
0: [pl_hess_diag, 'Hess diagram'],
1: [pl_bf_synth_cl, 'synthetic cluster']
}
fxn = plt_map.get(N, None)[0]
if fxn is None:
raise ValueError(" ERROR: there is no plot {}.".format(N))
try:
fxn(*args)
except Exception:
import traceback
print(traceback.format_exc())
print(" WARNING: error when plotting {}".format(plt_map.get(N)[1]))
| gpl-3.0 |
tridge/MAVProxy | MAVProxy/mavproxy.py | 3 | 55580 | #!/usr/bin/env python
'''
mavproxy - a MAVLink proxy program
Copyright Andrew Tridgell 2011
Released under the GNU GPL version 3 or later
'''
import sys, os, time, socket, signal
import fnmatch, errno, threading
import serial
import traceback
import select
import shlex
import math
import platform
import json
import struct
try:
reload
except NameError:
try:
from importlib import reload
except ImportError:
from imp import reload
try:
import queue as Queue
except ImportError:
import Queue
from builtins import input
from MAVProxy.modules.lib import textconsole
from MAVProxy.modules.lib import mp_util
from MAVProxy.modules.lib import rline
from MAVProxy.modules.lib import mp_module
from MAVProxy.modules.lib import dumpstacks
from MAVProxy.modules.lib import mp_substitute
from MAVProxy.modules.lib import multiproc
from MAVProxy.modules.mavproxy_link import preferred_ports
# adding all this allows pyinstaller to build a working windows executable
# note that using --hidden-import does not work for these modules
try:
multiproc.freeze_support()
from pymavlink import mavwp, mavutil
import matplotlib, HTMLParser
except Exception:
pass
# screensaver dbus syntax swiped from
# https://stackoverflow.com/questions/10885337/inhibit-screensaver-with-python
screensaver_interface = None
screensaver_cookie = None
try:
import atexit
import dbus
bus = dbus.SessionBus()
saver = bus.get_object('org.freedesktop.ScreenSaver', '/ScreenSaver')
screensaver_interface = dbus.Interface(saver, dbus_interface='org.freedesktop.ScreenSaver')
if screensaver_cookie is not None:
atexit.register(saver_interface.UnInhibit, [screensaver_cookie])
except Exception as e:
pass
if __name__ == '__main__':
multiproc.freeze_support()
#The MAVLink version being used (None, "1.0", "2.0")
mavversion = None
class MPStatus(object):
'''hold status information about the mavproxy'''
def __init__(self):
self.gps = None
self.msgs = {}
self.msg_count = {}
self.counters = {'MasterIn' : [], 'MasterOut' : 0, 'FGearIn' : 0, 'FGearOut' : 0, 'Slave' : 0}
self.bytecounters = {'MasterIn': []}
self.setup_mode = opts.setup
self.mav_error = 0
self.altitude = 0
self.last_distance_announce = 0.0
self.exit = False
self.flightmode = 'MAV'
self.last_mode_announce = 0
self.last_mode_announced = 'MAV'
self.logdir = None
self.last_heartbeat = 0
self.last_message = 0
self.heartbeat_error = False
self.last_apm_msg = None
self.last_apm_msg_time = 0
self.statustexts_by_sysidcompid = {}
self.highest_msec = {}
self.have_gps_lock = False
self.lost_gps_lock = False
self.last_gps_lock = 0
self.watch = None
self.last_streamrate1 = -1
self.last_streamrate2 = -1
self.last_seq = 0
self.armed = False
self.last_bytecounter_calc = 0
class ByteCounter(object):
def __init__(self):
self.total_count = 0
self.current_count = 0
self.buckets = []
self.max_buckets = 10 # 10 seconds
def update(self, bytecount):
self.total_count += bytecount
self.current_count += bytecount
def rotate(self):
'''move current count into a bucket, zero count'''
# huge assumption made that we're called rapidly enough to
# not need to rotate multiple buckets.
self.buckets.append(self.current_count)
self.current_count = 0
if len(self.buckets) > self.max_buckets:
self.buckets = self.buckets[-self.max_buckets:]
def rate(self):
if len(self.buckets) == 0:
return 0
total = 0
for bucket in self.buckets:
total += bucket
return total/float(len(self.buckets))
def total(self):
return self.total_count
def update_bytecounters(self):
'''rotate bytecounter buckets if required'''
now = time.time()
time_delta = now - self.last_bytecounter_calc
if time_delta < 1:
return
self.last_bytecounter_calc = now
for counter in self.bytecounters['MasterIn']:
counter.rotate()
def show(self, f, pattern=None, verbose=False):
'''write status to status.txt'''
if pattern is None:
f.write('Counters: ')
for c in self.counters:
f.write('%s:%s ' % (c, self.counters[c]))
f.write('\n')
f.write('MAV Errors: %u\n' % self.mav_error)
f.write(str(self.gps)+'\n')
for m in sorted(self.msgs.keys()):
if pattern is not None:
if not fnmatch.fnmatch(str(m).upper(), pattern.upper()):
continue
if getattr(self.msgs[m], '_instance_field', None) is not None and m.find('[') == -1 and pattern.find('*') != -1:
# only show instance versions for patterns
continue
msg = None
sysid = mpstate.settings.target_system
for mav in mpstate.mav_master:
if not sysid in mav.sysid_state:
continue
if not m in mav.sysid_state[sysid].messages:
continue
msg2 = mav.sysid_state[sysid].messages[m]
if msg is None or msg2._timestamp > msg._timestamp:
msg = msg2
if msg is None:
continue
if verbose:
try:
mavutil.dump_message_verbose(f, msg)
f.write("\n")
except AttributeError as e:
if "has no attribute 'dump_message_verbose'" in str(e):
print("pymavlink update required for --verbose")
else:
raise e
else:
f.write("%u: %s\n" % (self.msg_count[m], str(msg)))
def write(self):
'''write status to status.txt'''
f = open('status.txt', mode='w')
self.show(f)
f.close()
def say_text(text, priority='important'):
'''text output - default function for say()'''
mpstate.console.writeln(text)
def say(text, priority='important'):
'''text and/or speech output'''
mpstate.functions.say(text, priority)
def add_input(cmd, immediate=False):
'''add some command input to be processed'''
if immediate:
process_stdin(cmd)
else:
mpstate.input_queue.put(cmd)
class MAVFunctions(object):
'''core functions available in modules'''
def __init__(self):
self.process_stdin = add_input
self.param_set = param_set
self.get_mav_param = get_mav_param
self.say = say_text
# input handler can be overridden by a module
self.input_handler = None
class MPState(object):
'''holds state of mavproxy'''
def __init__(self):
self.console = textconsole.SimpleConsole()
self.map = None
self.map_functions = {}
self.click_location = None
self.click_time = None
self.vehicle_type = None
self.vehicle_name = None
from MAVProxy.modules.lib.mp_settings import MPSettings, MPSetting
self.settings = MPSettings(
[ MPSetting('link', int, 1, 'Primary Link', tab='Link', range=(0,4), increment=1),
MPSetting('streamrate', int, 4, 'Stream rate link1', range=(-1,500), increment=1),
MPSetting('streamrate2', int, 4, 'Stream rate link2', range=(-1,500), increment=1),
MPSetting('heartbeat', float, 1, 'Heartbeat rate (Hz)', range=(0,100), increment=0.1),
MPSetting('mavfwd', bool, True, 'Allow forwarded control'),
MPSetting('mavfwd_rate', bool, False, 'Allow forwarded rate control'),
MPSetting('shownoise', bool, True, 'Show non-MAVLink data'),
MPSetting('baudrate', int, opts.baudrate, 'baudrate for new links', range=(0,10000000), increment=1),
MPSetting('rtscts', bool, opts.rtscts, 'enable flow control'),
MPSetting('select_timeout', float, 0.01, 'select timeout'),
MPSetting('altreadout', int, 10, 'Altitude Readout',
range=(0,100), increment=1, tab='Announcements'),
MPSetting('distreadout', int, 200, 'Distance Readout', range=(0,10000), increment=1),
MPSetting('moddebug', int, opts.moddebug, 'Module Debug Level', range=(0,3), increment=1, tab='Debug'),
MPSetting('script_fatal', bool, False, 'fatal error on bad script', tab='Debug'),
MPSetting('compdebug', int, 0, 'Computation Debug Mask', range=(0,3), tab='Debug'),
MPSetting('flushlogs', bool, False, 'Flush logs on every packet'),
MPSetting('requireexit', bool, False, 'Require exit command'),
MPSetting('wpupdates', bool, True, 'Announce waypoint updates'),
MPSetting('wpterrainadjust', bool, True, 'Adjust alt of moved wp using terrain'),
MPSetting('wp_use_mission_int', bool, True, 'use MISSION_ITEM_INT messages'),
MPSetting('basealt', int, 0, 'Base Altitude', range=(0,30000), increment=1, tab='Altitude'),
MPSetting('wpalt', int, 100, 'Default WP Altitude', range=(0,10000), increment=1),
MPSetting('rallyalt', int, 90, 'Default Rally Altitude', range=(0,10000), increment=1),
MPSetting('terrainalt', str, 'Auto', 'Use terrain altitudes', choice=['Auto','True','False']),
MPSetting('guidedalt', int, 100, 'Default "Fly To" Altitude', range=(0,10000), increment=1),
MPSetting('rally_breakalt', int, 40, 'Default Rally Break Altitude', range=(0,10000), increment=1),
MPSetting('rally_flags', int, 0, 'Default Rally Flags', range=(0,10000), increment=1),
MPSetting('source_system', int, 255, 'MAVLink Source system', range=(0,255), increment=1, tab='MAVLink'),
MPSetting('source_component', int, 230, 'MAVLink Source component', range=(0,255), increment=1),
MPSetting('target_system', int, 0, 'MAVLink target system', range=(0,255), increment=1),
MPSetting('target_component', int, 0, 'MAVLink target component', range=(0,255), increment=1),
MPSetting('state_basedir', str, None, 'base directory for logs and aircraft directories'),
MPSetting('allow_unsigned', bool, True, 'whether unsigned packets will be accepted'),
MPSetting('dist_unit', str, 'm', 'distance unit', choice=['m', 'nm', 'miles'], tab='Units'),
MPSetting('height_unit', str, 'm', 'height unit', choice=['m', 'feet']),
MPSetting('speed_unit', str, 'm/s', 'height unit', choice=['m/s', 'knots', 'mph']),
MPSetting('fwdpos', bool, False, 'Forward GLOBAL_POSITION_INT on all links'),
MPSetting('checkdelay', bool, True, 'check for link delay'),
MPSetting('param_ftp', bool, True, 'try ftp for parameter download'),
MPSetting('vehicle_name', str, '', 'Vehicle Name', tab='Vehicle'),
MPSetting('sys_status_error_warn_interval', int, 30, 'interval to warn of autopilot software failure'),
MPSetting('inhibit_screensaver_when_armed', bool, False, 'inhibit screensaver while vehicle armed'),
])
self.completions = {
"script" : ["(FILENAME)"],
"set" : ["(SETTING)"],
"status" : ["(VARIABLE)"],
"module" : ["list",
"load (AVAILMODULES)",
"<unload|reload> (LOADEDMODULES)"]
}
self.status = MPStatus()
# master mavlink device
self.mav_master = None
# mavlink outputs
self.mav_outputs = []
self.sysid_outputs = {}
# SITL output
self.sitl_output = None
self.mav_param_by_sysid = {}
self.mav_param_by_sysid[(self.settings.target_system,self.settings.target_component)] = mavparm.MAVParmDict()
self.modules = []
self.public_modules = {}
self.functions = MAVFunctions()
self.select_extra = {}
self.continue_mode = False
self.aliases = {}
import platform
self.system = platform.system()
self.multi_instance = {}
self.instance_count = {}
self.is_sitl = False
self.start_time_s = time.time()
self.attitude_time_s = 0
@property
def mav_param(self):
'''map mav_param onto the current target system parameters'''
compid = self.settings.target_component
if compid == 0:
compid = 1
sysid = (self.settings.target_system, compid)
if not sysid in self.mav_param_by_sysid:
self.mav_param_by_sysid[sysid] = mavparm.MAVParmDict()
return self.mav_param_by_sysid[sysid]
def module(self, name):
'''Find a public module (most modules are private)'''
if name in self.public_modules:
return self.public_modules[name]
return None
def master(self, target_sysid = -1):
'''return the currently chosen mavlink master object'''
if len(self.mav_master) == 0:
return None
if self.settings.link > len(self.mav_master):
self.settings.link = 1
if target_sysid != -1:
# if we're looking for a specific system ID then try to find best
# link for that
best_link = None
best_timestamp = 0
for m in self.mav_master:
try:
tstamp = m.sysid_state[target_sysid].messages['HEARTBEAT']._timestamp
except Exception:
continue
if tstamp > best_timestamp:
best_link = m
best_timestamp = tstamp
if best_link is not None:
return best_link
# try to use one with no link error
if not self.mav_master[self.settings.link-1].linkerror:
return self.mav_master[self.settings.link-1]
for m in self.mav_master:
if not m.linkerror:
return m
return self.mav_master[self.settings.link-1]
def notify_click(self):
notify_mods = ['map', 'misseditor']
for modname in notify_mods:
mod = self.module(modname)
if mod is not None:
mod.click_updated()
def click(self, latlng):
if latlng is None:
self.click_location = None
self.click_time = None
self.notify_click()
return
(lat, lng) = latlng
if lat is None:
print("Bad Lat")
return
if lng is None:
print("Bad lng")
return
self.click_location = (lat, lng)
self.click_time = time.time()
self.notify_click()
def get_mav_param(param, default=None):
'''return a EEPROM parameter value'''
return mpstate.mav_param.get(param, default)
def param_set(name, value, retries=3):
'''set a parameter'''
name = name.upper()
return mpstate.mav_param.mavset(mpstate.master(), name, value, retries=retries)
def cmd_script(args):
'''run a script'''
if len(args) < 1:
print("usage: script <filename>")
return
run_script(args[0])
def cmd_set(args):
'''control mavproxy options'''
mpstate.settings.command(args)
def cmd_status(args):
'''show status'''
verbose = False
if "--verbose" in args:
verbose = True
args = list(filter(lambda x : x != "--verbose", args))
if len(args) == 0:
mpstate.status.show(sys.stdout, pattern=None, verbose=verbose)
else:
for pattern in args:
mpstate.status.show(sys.stdout, pattern=pattern, verbose=verbose)
def cmd_setup(args):
mpstate.status.setup_mode = True
mpstate.rl.set_prompt("")
def cmd_reset(args):
print("Resetting master")
mpstate.master().reset()
def cmd_click(args):
'''synthesise click at lat/lon; no arguments is "unclick"'''
if len(args) == 0:
mpstate.click(None)
return
if len(args) < 2:
print("click LAT_EXPRESSION LNG_EXPRESSION")
return
lat = mavutil.evaluate_expression(args[0], mpstate.master().messages)
lng = mavutil.evaluate_expression(args[1], mpstate.master().messages)
mpstate.click((lat, lng))
def cmd_watch(args):
'''watch a mavlink packet pattern'''
if len(args) == 0:
mpstate.status.watch = None
return
mpstate.status.watch = args
print("Watching %s" % mpstate.status.watch)
def generate_kwargs(args):
kwargs = {}
module_components = args.split(":{", 1)
module_name = module_components[0]
if (len(module_components) == 2 and module_components[1].endswith("}")):
# assume json
try:
module_args = "{"+module_components[1]
kwargs = json.loads(module_args)
except ValueError as e:
print('Invalid JSON argument: {0} ({1})'.format(module_args,
repr(e)))
return (module_name, kwargs)
def get_exception_stacktrace(e):
if sys.version_info[0] >= 3:
ret = "%s\n" % e
ret += ''.join(traceback.format_exception(etype=type(e),
value=e,
tb=e.__traceback__))
return ret
return traceback.format_exc(e)
def load_module(modname, quiet=False, **kwargs):
'''load a module'''
modpaths = ['MAVProxy.modules.mavproxy_%s' % modname, modname]
for (m,pm) in mpstate.modules:
if m.name == modname and not modname in mpstate.multi_instance:
if not quiet:
print("module %s already loaded" % modname)
# don't report an error
return True
ex = None
for modpath in modpaths:
try:
m = import_package(modpath)
reload(m)
module = m.init(mpstate, **kwargs)
if isinstance(module, mp_module.MPModule):
mpstate.modules.append((module, m))
if not quiet:
if kwargs:
print("Loaded module %s with kwargs = %s" % (modname, kwargs))
else:
print("Loaded module %s" % (modname,))
return True
else:
ex = "%s.init did not return a MPModule instance" % modname
break
except ImportError as msg:
ex = msg
if mpstate.settings.moddebug > 1:
print(get_exception_stacktrace(ex))
help_traceback = ""
if mpstate.settings.moddebug < 3:
help_traceback = " Use 'set moddebug 3' in the MAVProxy console to enable traceback"
print("Failed to load module: %s.%s" % (ex, help_traceback))
return False
def unload_module(modname):
'''unload a module'''
for (m,pm) in mpstate.modules:
if m.name == modname:
if hasattr(m, 'unload'):
t = threading.Thread(target=lambda : m.unload(), name="unload %s" % modname)
t.start()
t.join(timeout=5)
if t.is_alive():
print("unload on module %s did not complete" % m.name)
mpstate.modules.remove((m,pm))
return False
mpstate.modules.remove((m,pm))
if modname in mpstate.public_modules:
del mpstate.public_modules[modname]
print("Unloaded module %s" % modname)
return True
print("Unable to find module %s" % modname)
return False
def cmd_module(args):
'''module commands'''
usage = "usage: module <list|load|reload|unload>"
if len(args) < 1:
print(usage)
return
if args[0] == "list":
mods = []
for (m,pm) in mpstate.modules:
mods.append(m)
mods = sorted(mods, key=lambda m : m.name)
for m in mods:
print("%s: %s" % (m.name, m.description))
elif args[0] == "load":
if len(args) < 2:
print("usage: module load <name>")
return
(modname, kwargs) = generate_kwargs(args[1])
try:
load_module(modname, **kwargs)
except TypeError as ex:
print(ex)
print("%s module does not support keyword arguments"% modname)
return
elif args[0] == "reload":
if len(args) < 2:
print("usage: module reload <name>")
return
(modname, kwargs) = generate_kwargs(args[1])
pmodule = None
for (m,pm) in mpstate.modules:
if m.name == modname:
pmodule = pm
if pmodule is None:
print("Module %s not loaded" % modname)
return
if unload_module(modname):
import zipimport
try:
reload(pmodule)
except ImportError:
clear_zipimport_cache()
reload(pmodule)
try:
if load_module(modname, quiet=True, **kwargs):
print("Reloaded module %s" % modname)
except TypeError:
print("%s module does not support keyword arguments" % modname)
elif args[0] == "unload":
if len(args) < 2:
print("usage: module unload <name>")
return
modname = os.path.basename(args[1])
unload_module(modname)
else:
print(usage)
def cmd_alias(args):
'''alias commands'''
usage = "usage: alias <add|remove|list>"
if len(args) < 1 or args[0] == "list":
if len(args) >= 2:
wildcard = args[1].upper()
else:
wildcard = '*'
for a in sorted(mpstate.aliases.keys()):
if fnmatch.fnmatch(a.upper(), wildcard):
print("%-15s : %s" % (a, mpstate.aliases[a]))
elif args[0] == "add":
if len(args) < 3:
print(usage)
return
a = args[1]
mpstate.aliases[a] = ' '.join(args[2:])
elif args[0] == "remove":
if len(args) != 2:
print(usage)
return
a = args[1]
if a in mpstate.aliases:
mpstate.aliases.pop(a)
else:
print("no alias %s" % a)
else:
print(usage)
return
def clear_zipimport_cache():
"""Clear out cached entries from _zip_directory_cache.
See http://www.digi.com/wiki/developer/index.php/Error_messages"""
import sys, zipimport
syspath_backup = list(sys.path)
zipimport._zip_directory_cache.clear()
# load back items onto sys.path
sys.path = syspath_backup
# add this too: see https://mail.python.org/pipermail/python-list/2005-May/353229.html
sys.path_importer_cache.clear()
# http://stackoverflow.com/questions/211100/pythons-import-doesnt-work-as-expected
# has info on why this is necessary.
def import_package(name):
"""Given a package name like 'foo.bar.quux', imports the package
and returns the desired module."""
import zipimport
try:
mod = __import__(name)
except ImportError:
clear_zipimport_cache()
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
command_map = {
'script' : (cmd_script, 'run a script of MAVProxy commands'),
'setup' : (cmd_setup, 'go into setup mode'),
'reset' : (cmd_reset, 'reopen the connection to the MAVLink master'),
'click' : (cmd_click, 'set click location'),
'status' : (cmd_status, 'show status'),
'set' : (cmd_set, 'mavproxy settings'),
'watch' : (cmd_watch, 'watch a MAVLink pattern'),
'module' : (cmd_module, 'module commands'),
'alias' : (cmd_alias, 'command aliases')
}
def shlex_quotes(value):
'''see http://stackoverflow.com/questions/6868382/python-shlex-split-ignore-single-quotes'''
lex = shlex.shlex(value)
lex.quotes = '"'
lex.whitespace_split = True
lex.commenters = ''
return list(lex)
def process_stdin(line):
'''handle commands from user'''
if line is None:
sys.exit(0)
# allow for modules to override input handling
if mpstate.functions.input_handler is not None:
mpstate.functions.input_handler(line)
return
line = line.strip()
if mpstate.status.setup_mode:
# in setup mode we send strings straight to the master
if line == '.':
mpstate.status.setup_mode = False
mpstate.status.flightmode = "MAV"
mpstate.rl.set_prompt("MAV> ")
return
if line != '+++':
line += '\r'
for c in line:
time.sleep(0.01)
if sys.version_info.major >= 3:
mpstate.master().write(bytes(c, "ascii"))
else:
mpstate.master().write(c)
return
if not line:
return
try:
args = shlex_quotes(line)
except Exception as e:
print("Caught shlex exception: %s" % e.message);
return
cmd = args[0]
while cmd in mpstate.aliases:
line = mpstate.aliases[cmd]
args = shlex.split(line) + args[1:]
cmd = args[0]
if cmd == 'help':
k = command_map.keys()
k = sorted(k)
for cmd in k:
(fn, help) = command_map[cmd]
print("%-15s : %s" % (cmd, help))
return
if cmd == 'exit' and mpstate.settings.requireexit:
mpstate.status.exit = True
return
if not cmd in command_map:
for (m,pm) in mpstate.modules:
if hasattr(m, 'unknown_command'):
try:
if m.unknown_command(args):
return
except Exception as e:
print("ERROR in command: %s" % str(e))
print("Unknown command '%s'" % line)
return
(fn, help) = command_map[cmd]
try:
fn(args[1:])
except Exception as e:
print("ERROR in command %s: %s" % (args[1:], str(e)))
if mpstate.settings.moddebug > 1:
traceback.print_exc()
def process_master(m):
'''process packets from the MAVLink master'''
try:
s = m.recv(16*1024)
except Exception:
time.sleep(0.1)
return
# prevent a dead serial port from causing the CPU to spin. The user hitting enter will
# cause it to try and reconnect
if len(s) == 0:
time.sleep(0.1)
return
mpstate.status.bytecounters['MasterIn'][m.linknum].update(len(s))
if (mpstate.settings.compdebug & 1) != 0:
return
if mpstate.logqueue_raw:
mpstate.logqueue_raw.put(bytearray(s))
if mpstate.status.setup_mode:
if mpstate.system == 'Windows':
# strip nsh ansi codes
s = s.replace("\033[K","")
if sys.version_info.major >= 3:
sys.stdout.write(str(s, "ascii", "ignore"))
else:
sys.stdout.write(str(s))
sys.stdout.flush()
return
global mavversion
if m.first_byte and mavversion is None:
m.auto_mavlink_version(s)
msgs = m.mav.parse_buffer(s)
if msgs:
for msg in msgs:
sysid = msg.get_srcSystem()
if sysid in mpstate.sysid_outputs:
# the message has been handled by a specialised handler for this system
continue
if getattr(m, '_timestamp', None) is None:
m.post_message(msg)
if msg.get_type() == "BAD_DATA":
if opts.show_errors:
mpstate.console.writeln("MAV error: %s" % msg)
mpstate.status.mav_error += 1
def process_mavlink(slave):
'''process packets from MAVLink slaves, forwarding to the master'''
try:
buf = slave.recv()
except socket.error:
return
try:
global mavversion
if slave.first_byte and mavversion is None:
slave.auto_mavlink_version(buf)
msgs = slave.mav.parse_buffer(buf)
except mavutil.mavlink.MAVError as e:
mpstate.console.error("Bad MAVLink slave message from %s: %s" % (slave.address, e.message))
return
if msgs is None:
return
if mpstate.settings.mavfwd and not mpstate.status.setup_mode:
for m in msgs:
target_sysid = getattr(m, 'target_system', -1)
mbuf = m.get_msgbuf()
mpstate.master(target_sysid).write(mbuf)
if mpstate.logqueue:
usec = int(time.time() * 1.0e6)
mpstate.logqueue.put(bytearray(struct.pack('>Q', usec) + m.get_msgbuf()))
if mpstate.status.watch:
for msg_type in mpstate.status.watch:
if fnmatch.fnmatch(m.get_type().upper(), msg_type.upper()):
mpstate.console.writeln('> '+ str(m))
break
mpstate.status.counters['Slave'] += 1
def mkdir_p(dir):
'''like mkdir -p'''
if not dir:
return
if dir.endswith("/"):
mkdir_p(dir[:-1])
return
if os.path.isdir(dir):
return
mkdir_p(os.path.dirname(dir))
os.mkdir(dir)
def log_writer():
'''log writing thread'''
while True:
mpstate.logfile_raw.write(bytearray(mpstate.logqueue_raw.get()))
timeout = time.time() + 10
while not mpstate.logqueue_raw.empty() and time.time() < timeout:
mpstate.logfile_raw.write(mpstate.logqueue_raw.get())
while not mpstate.logqueue.empty() and time.time() < timeout:
mpstate.logfile.write(mpstate.logqueue.get())
if mpstate.settings.flushlogs or time.time() >= timeout:
mpstate.logfile.flush()
mpstate.logfile_raw.flush()
# If state_basedir is NOT set then paths for logs and aircraft
# directories are relative to mavproxy's cwd
def log_paths():
'''Returns tuple (logdir, telemetry_log_filepath, raw_telemetry_log_filepath)'''
if opts.aircraft is not None:
dirname = ""
if opts.mission is not None:
print(opts.mission)
dirname += "%s/logs/%s/Mission%s" % (opts.aircraft, time.strftime("%Y-%m-%d"), opts.mission)
else:
dirname += "%s/logs/%s" % (opts.aircraft, time.strftime("%Y-%m-%d"))
# dirname is currently relative. Possibly add state_basedir:
if mpstate.settings.state_basedir is not None:
dirname = os.path.join(mpstate.settings.state_basedir,dirname)
mkdir_p(dirname)
highest = None
for i in range(1, 10000):
fdir = os.path.join(dirname, 'flight%u' % i)
if not os.path.exists(fdir):
break
highest = fdir
if mpstate.continue_mode and highest is not None:
fdir = highest
elif os.path.exists(fdir):
print("Flight logs full")
sys.exit(1)
logname = 'flight.tlog'
logdir = fdir
else:
logname = os.path.basename(opts.logfile)
dir_path = os.path.dirname(opts.logfile)
if not os.path.isabs(dir_path) and mpstate.settings.state_basedir is not None:
dir_path = os.path.join(mpstate.settings.state_basedir,dir_path)
logdir = dir_path
mkdir_p(logdir)
return (logdir,
os.path.join(logdir, logname),
os.path.join(logdir, logname + '.raw'))
def open_telemetry_logs(logpath_telem, logpath_telem_raw):
'''open log files'''
if opts.append_log or opts.continue_mode:
mode = 'ab'
else:
mode = 'wb'
try:
mpstate.logfile = open(logpath_telem, mode=mode)
mpstate.logfile_raw = open(logpath_telem_raw, mode=mode)
print("Log Directory: %s" % mpstate.status.logdir)
print("Telemetry log: %s" % logpath_telem)
#make sure there's enough free disk space for the logfile (>200Mb)
#statvfs doesn't work in Windows
if platform.system() != 'Windows':
stat = os.statvfs(logpath_telem)
if stat.f_bfree*stat.f_bsize < 209715200:
print("ERROR: Not enough free disk space for logfile")
mpstate.status.exit = True
return
# use a separate thread for writing to the logfile to prevent
# delays during disk writes (important as delays can be long if camera
# app is running)
t = threading.Thread(target=log_writer, name='log_writer')
t.daemon = True
t.start()
except Exception as e:
print("ERROR: opening log file for writing: %s" % e)
mpstate.status.exit = True
return
def set_stream_rates():
'''set mavlink stream rates'''
if (not msg_period.trigger() and
mpstate.status.last_streamrate1 == mpstate.settings.streamrate and
mpstate.status.last_streamrate2 == mpstate.settings.streamrate2):
return
mpstate.status.last_streamrate1 = mpstate.settings.streamrate
mpstate.status.last_streamrate2 = mpstate.settings.streamrate2
for master in mpstate.mav_master:
if master.linknum == 0:
rate = mpstate.settings.streamrate
else:
rate = mpstate.settings.streamrate2
if rate != -1 and mpstate.settings.streamrate != -1:
master.mav.request_data_stream_send(mpstate.settings.target_system, mpstate.settings.target_component,
mavutil.mavlink.MAV_DATA_STREAM_ALL,
rate, 1)
def check_link_status():
'''check status of master links'''
tnow = time.time()
if mpstate.status.last_message != 0 and tnow > mpstate.status.last_message + 5:
say("no link")
mpstate.status.heartbeat_error = True
for master in mpstate.mav_master:
if not master.linkerror and (tnow > master.last_message + 5 or master.portdead):
say("link %s down" % (mp_module.MPModule.link_label(master)))
master.linkerror = True
def send_heartbeat(master):
if master.mavlink10():
master.mav.heartbeat_send(mavutil.mavlink.MAV_TYPE_GCS, mavutil.mavlink.MAV_AUTOPILOT_INVALID,
0, 0, 0)
else:
MAV_GROUND = 5
MAV_AUTOPILOT_NONE = 4
master.mav.heartbeat_send(MAV_GROUND, MAV_AUTOPILOT_NONE)
def periodic_tasks():
'''run periodic checks'''
if mpstate.status.setup_mode:
return
if (mpstate.settings.compdebug & 2) != 0:
return
if mpstate.settings.heartbeat != 0:
heartbeat_period.frequency = mpstate.settings.heartbeat
if heartbeat_period.trigger() and mpstate.settings.heartbeat != 0:
mpstate.status.counters['MasterOut'] += 1
for master in mpstate.mav_master:
send_heartbeat(master)
if heartbeat_check_period.trigger():
check_link_status()
set_stream_rates()
mpstate.status.update_bytecounters()
# call optional module idle tasks. These are called at several hundred Hz
for (m,pm) in mpstate.modules:
if hasattr(m, 'idle_task'):
try:
m.idle_task()
except Exception as msg:
if mpstate.settings.moddebug == 1:
print(msg)
elif mpstate.settings.moddebug > 1:
print(get_exception_stacktrace(msg))
# also see if the module should be unloaded:
if m.needs_unloading:
unload_module(m.name)
def main_loop():
'''main processing loop'''
global screensaver_cookie
if not mpstate.status.setup_mode and not opts.nowait:
for master in mpstate.mav_master:
if master.linknum != 0:
break
print("Waiting for heartbeat from %s" % master.address)
send_heartbeat(master)
master.wait_heartbeat(timeout=0.1)
set_stream_rates()
while True:
if mpstate is None or mpstate.status.exit:
return
# enable or disable screensaver:
if (mpstate.settings.inhibit_screensaver_when_armed and
screensaver_interface is not None):
if mpstate.status.armed and screensaver_cookie is None:
# now we can inhibit the screensaver
screensaver_cookie = screensaver_interface.Inhibit("MAVProxy",
"Vehicle is armed")
elif not mpstate.status.armed and screensaver_cookie is not None:
# we can also restore it
screensaver_interface.UnInhibit(screensaver_cookie)
screensaver_cookie = None
while not mpstate.input_queue.empty():
line = mpstate.input_queue.get()
mpstate.input_count += 1
cmds = line.split(';')
if len(cmds) == 1 and cmds[0] == "":
mpstate.empty_input_count += 1
for c in cmds:
process_stdin(c)
for master in mpstate.mav_master:
if master.fd is None:
if master.port.inWaiting() > 0:
process_master(master)
periodic_tasks()
rin = []
for master in mpstate.mav_master:
if master.fd is not None and not master.portdead:
rin.append(master.fd)
for m in mpstate.mav_outputs:
rin.append(m.fd)
for sysid in mpstate.sysid_outputs:
m = mpstate.sysid_outputs[sysid]
rin.append(m.fd)
if rin == []:
time.sleep(0.0001)
continue
for fd in mpstate.select_extra:
rin.append(fd)
try:
(rin, win, xin) = select.select(rin, [], [], mpstate.settings.select_timeout)
except select.error:
continue
if mpstate is None:
return
for fd in rin:
if mpstate is None:
return
for master in mpstate.mav_master:
if fd == master.fd:
process_master(master)
if mpstate is None:
return
continue
for m in mpstate.mav_outputs:
if fd == m.fd:
process_mavlink(m)
if mpstate is None:
return
continue
for sysid in mpstate.sysid_outputs:
m = mpstate.sysid_outputs[sysid]
if fd == m.fd:
process_mavlink(m)
if mpstate is None:
return
continue
# this allow modules to register their own file descriptors
# for the main select loop
if fd in mpstate.select_extra:
try:
# call the registered read function
(fn, args) = mpstate.select_extra[fd]
fn(args)
except Exception as msg:
if mpstate.settings.moddebug == 1:
print(msg)
# on an exception, remove it from the select list
mpstate.select_extra.pop(fd)
def input_loop():
'''wait for user input'''
while mpstate.status.exit != True:
try:
line = mpstate.rl.input()
mpstate.input_queue.put(line)
except (EOFError, IOError):
mpstate.status.exit = True
def run_script(scriptfile):
'''run a script file'''
try:
f = open(scriptfile, mode='r')
except Exception:
return
mpstate.console.writeln("Running script %s" % scriptfile)
sub = mp_substitute.MAVSubstitute()
for line in f:
line = line.strip()
if line == "" or line.startswith('#'):
continue
try:
line = sub.substitute(line, os.environ)
except mp_substitute.MAVSubstituteError as ex:
print("Bad variable: %s" % str(ex))
if mpstate.settings.script_fatal:
sys.exit(1)
continue
if line.startswith('@'):
line = line[1:]
else:
mpstate.console.writeln("-> %s" % line)
process_stdin(line)
f.close()
def set_mav_version(mav10, mav20, autoProtocol, mavversionArg):
'''Set the Mavlink version based on commandline options'''
# if(mav10 == True or mav20 == True or autoProtocol == True):
# print("Warning: Using deprecated --mav10, --mav20 or --auto-protocol options. Use --mavversion instead")
#sanity check the options
if (mav10 == True or mav20 == True) and autoProtocol == True:
print("Error: Can't have [--mav10, --mav20] and --auto-protocol both True")
sys.exit(1)
if mav10 == True and mav20 == True:
print("Error: Can't have --mav10 and --mav20 both True")
sys.exit(1)
if mavversionArg is not None and (mav10 == True or mav20 == True or autoProtocol == True):
print("Error: Can't use --mavversion with legacy (--mav10, --mav20 or --auto-protocol) options")
sys.exit(1)
#and set the specific mavlink version (False = autodetect)
global mavversion
if mavversionArg == "1.0" or mav10 == True:
os.environ['MAVLINK09'] = '1'
mavversion = "1"
else:
os.environ['MAVLINK20'] = '1'
mavversion = "2"
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser("mavproxy.py [options]")
parser.add_option("--master", dest="master", action='append',
metavar="DEVICE[,BAUD]", help="MAVLink master port and optional baud rate",
default=[])
parser.add_option("", "--force-connected", dest="force_connected", help="Use master even if initial connection fails",
action='store_true', default=False)
parser.add_option("--out", dest="output", action='append',
metavar="DEVICE[,BAUD]", help="MAVLink output port and optional baud rate",
default=[])
parser.add_option("--baudrate", dest="baudrate", type='int',
help="default serial baud rate", default=57600)
parser.add_option("--sitl", dest="sitl", default=None, help="SITL output port")
parser.add_option("--streamrate",dest="streamrate", default=4, type='int',
help="MAVLink stream rate")
parser.add_option("--source-system", dest='SOURCE_SYSTEM', type='int',
default=255, help='MAVLink source system for this GCS')
parser.add_option("--source-component", dest='SOURCE_COMPONENT', type='int',
default=230, help='MAVLink source component for this GCS')
parser.add_option("--target-system", dest='TARGET_SYSTEM', type='int',
default=0, help='MAVLink target master system')
parser.add_option("--target-component", dest='TARGET_COMPONENT', type='int',
default=0, help='MAVLink target master component')
parser.add_option("--logfile", dest="logfile", help="MAVLink master logfile",
default='mav.tlog')
parser.add_option("-a", "--append-log", dest="append_log", help="Append to log files",
action='store_true', default=False)
parser.add_option("--quadcopter", dest="quadcopter", help="use quadcopter controls",
action='store_true', default=False)
parser.add_option("--setup", dest="setup", help="start in setup mode",
action='store_true', default=False)
parser.add_option("--nodtr", dest="nodtr", help="disable DTR drop on close",
action='store_true', default=False)
parser.add_option("--show-errors", dest="show_errors", help="show MAVLink error packets",
action='store_true', default=False)
parser.add_option("--speech", dest="speech", help="use text to speech",
action='store_true', default=False)
parser.add_option("--aircraft", dest="aircraft", help="aircraft name", default=None)
parser.add_option("--cmd", dest="cmd", help="initial commands", default=None, action='append')
parser.add_option("--console", action='store_true', help="use GUI console")
if platform.system() == 'Windows':
parser.add_option("--no-console", action='store_true', help="don't use GUI console")
parser.add_option("--map", action='store_true', help="load map module")
parser.add_option(
'--load-module',
action='append',
default=[],
help='Load the specified module. Can be used multiple times, or with a comma separated list')
parser.add_option("--mav10", action='store_true', default=False, help="Use MAVLink protocol 1.0")
parser.add_option("--mav20", action='store_true', default=False, help="Use MAVLink protocol 2.0")
parser.add_option("--auto-protocol", action='store_true', default=False, help="Auto detect MAVLink protocol version")
parser.add_option("--mavversion", type='choice', choices=['1.0', '2.0'] , help="Force MAVLink Version (1.0, 2.0). Otherwise autodetect version")
parser.add_option("--nowait", action='store_true', default=False, help="don't wait for HEARTBEAT on startup")
parser.add_option("-c", "--continue", dest='continue_mode', action='store_true', default=False, help="continue logs")
parser.add_option("--dialect", default="ardupilotmega", help="MAVLink dialect")
parser.add_option("--rtscts", action='store_true', help="enable hardware RTS/CTS flow control")
parser.add_option("--moddebug", type=int, help="module debug level", default=0)
parser.add_option("--mission", dest="mission", help="mission name", default=None)
parser.add_option("--daemon", action='store_true', help="run in daemon mode, do not start interactive shell")
parser.add_option("--non-interactive", action='store_true', help="do not start interactive shell")
parser.add_option("--profile", action='store_true', help="run the Yappi python profiler")
parser.add_option("--state-basedir", default=None, help="base directory for logs and aircraft directories")
parser.add_option("--version", action='store_true', help="version information")
parser.add_option("--default-modules", default="log,signing,wp,rally,fence,ftp,param,relay,tuneopt,arm,mode,calibration,rc,auxopt,misc,cmdlong,battery,terrain,output,adsb,layout", help='default module list')
(opts, args) = parser.parse_args()
if len(args) != 0:
print("ERROR: mavproxy takes no position arguments; got (%s)" % str(args))
sys.exit(1)
# warn people about ModemManager which interferes badly with APM and Pixhawk
if os.path.exists("/usr/sbin/ModemManager"):
print("WARNING: You should uninstall ModemManager as it conflicts with APM and Pixhawk")
#set the Mavlink version, if required
set_mav_version(opts.mav10, opts.mav20, opts.auto_protocol, opts.mavversion)
from pymavlink import mavutil, mavparm
mavutil.set_dialect(opts.dialect)
#version information
if opts.version:
#pkg_resources doesn't work in the windows exe build, so read the version file
try:
import pkg_resources
version = pkg_resources.require("mavproxy")[0].version
except:
start_script = mp_util.dot_mavproxy("version.txt")
f = open(start_script, 'r')
version = f.readline()
print("MAVProxy is a modular ground station using the mavlink protocol")
print("MAVProxy Version: " + version)
sys.exit(0)
# global mavproxy state
mpstate = MPState()
mpstate.status.exit = False
mpstate.command_map = command_map
mpstate.continue_mode = opts.continue_mode
# queues for logging
mpstate.logqueue = Queue.Queue()
mpstate.logqueue_raw = Queue.Queue()
if opts.speech:
# start the speech-dispatcher early, so it doesn't inherit any ports from
# modules/mavutil
load_module('speech')
serial_list = mavutil.auto_detect_serial(preferred_list=preferred_ports)
serial_list.sort(key=lambda x: x.device)
# remove OTG2 ports for dual CDC
if len(serial_list) == 2 and serial_list[0].device.startswith("/dev/serial/by-id"):
if serial_list[0].device[:-1] == serial_list[1].device[0:-1]:
serial_list.pop(1)
if not opts.master:
print('Auto-detected serial ports are:')
for port in serial_list:
print("%s" % port)
# container for status information
mpstate.settings.target_system = opts.TARGET_SYSTEM
mpstate.settings.target_component = opts.TARGET_COMPONENT
mpstate.mav_master = []
mpstate.rl = rline.rline("MAV> ", mpstate)
def quit_handler(signum = None, frame = None):
#print('Signal handler called with signal', signum)
if mpstate.status.exit:
print('Clean shutdown impossible, forcing an exit')
sys.exit(0)
else:
mpstate.status.exit = True
# Listen for kill signals to cleanly shutdown modules
fatalsignals = [signal.SIGTERM]
try:
fatalsignals.append(signal.SIGQUIT)
signal.signal(signal.SIGHUP, signal.SIG_IGN)
except Exception:
pass
if opts.daemon or opts.non_interactive: # SIGINT breaks readline parsing - if we are interactive, just let things die
fatalsignals.append(signal.SIGINT)
for sig in fatalsignals:
signal.signal(sig, quit_handler)
load_module('link', quiet=True)
mpstate.settings.source_system = opts.SOURCE_SYSTEM
mpstate.settings.source_component = opts.SOURCE_COMPONENT
# open master link
for mdev in opts.master:
if not mpstate.module('link').link_add(mdev, force_connected=opts.force_connected):
sys.exit(1)
if not opts.master and len(serial_list) == 1:
print("Connecting to %s" % serial_list[0])
mpstate.module('link').link_add(serial_list[0].device)
elif not opts.master and len(serial_list) > 1:
print("Warning: multiple possible serial ports. Use console GUI or 'link add' to add port, or restart using --master to select a single port")
#if no display, assume running CLI mode and exit
if platform.system() != 'Windows' and "DISPLAY" not in os.environ:
sys.exit(1)
elif not opts.master:
wifi_device = '0.0.0.0:14550'
mpstate.module('link').link_add(wifi_device)
# open any mavlink output ports
for port in opts.output:
mpstate.mav_outputs.append(mavutil.mavlink_connection(port, baud=int(opts.baudrate), input=False))
if opts.sitl:
mpstate.sitl_output = mavutil.mavudp(opts.sitl, input=False)
mpstate.settings.streamrate = opts.streamrate
mpstate.settings.streamrate2 = opts.streamrate
if opts.state_basedir is not None:
mpstate.settings.state_basedir = opts.state_basedir
msg_period = mavutil.periodic_event(1.0/15)
heartbeat_period = mavutil.periodic_event(1)
heartbeat_check_period = mavutil.periodic_event(0.33)
mpstate.input_queue = Queue.Queue()
mpstate.input_count = 0
mpstate.empty_input_count = 0
if opts.setup:
mpstate.rl.set_prompt("")
# call this early so that logdir is setup based on --aircraft
(mpstate.status.logdir, logpath_telem, logpath_telem_raw) = log_paths()
for module in opts.load_module:
modlist = module.split(',')
for mod in modlist:
process_stdin('module load %s' % (mod))
if not opts.setup:
# some core functionality is in modules
standard_modules = opts.default_modules.split(',')
for m in standard_modules:
load_module(m, quiet=True)
if platform.system() != 'Windows':
if opts.console:
process_stdin('module load console')
else:
# default to having console on windows
if opts.console or not opts.no_console:
process_stdin('module load console')
if opts.map:
process_stdin('module load map')
start_scripts = []
if not opts.setup:
if 'HOME' in os.environ:
start_scripts.append(os.path.join(os.environ['HOME'], ".mavinit.scr"))
start_script = mp_util.dot_mavproxy("mavinit.scr")
start_scripts.append(start_script)
if (mpstate.settings.state_basedir is not None and
opts.aircraft is not None):
start_script = os.path.join(mpstate.settings.state_basedir, opts.aircraft, "mavinit.scr")
start_scripts.append(start_script)
for start_script in start_scripts:
if os.path.exists(start_script):
print("Running script (%s)" % (start_script))
run_script(start_script)
if opts.aircraft is not None:
start_script = os.path.join(opts.aircraft, "mavinit.scr")
if os.path.exists(start_script):
run_script(start_script)
else:
print("no script %s" % start_script)
if opts.cmd is not None:
for cstr in opts.cmd:
cmds = cstr.split(';')
for c in cmds:
process_stdin(c)
if opts.profile:
import yappi # We do the import here so that we won't barf if run normally and yappi not available
yappi.start()
# log all packets from the master, for later replay
open_telemetry_logs(logpath_telem, logpath_telem_raw)
# run main loop as a thread
mpstate.status.thread = threading.Thread(target=main_loop, name='main_loop')
mpstate.status.thread.daemon = True
mpstate.status.thread.start()
# use main program for input. This ensures the terminal cleans
# up on exit
while (mpstate.status.exit != True):
try:
if opts.daemon or opts.non_interactive:
time.sleep(0.1)
else:
input_loop()
except KeyboardInterrupt:
if mpstate.settings.requireexit:
print("Interrupt caught. Use 'exit' to quit MAVProxy.")
#Just lost the map and console, get them back:
for (m,pm) in mpstate.modules:
if m.name in ["map", "console"]:
if hasattr(m, 'unload'):
try:
m.unload()
except Exception:
pass
reload(m)
m.init(mpstate)
else:
mpstate.status.exit = True
sys.exit(1)
if opts.profile:
yappi.get_func_stats().print_all()
yappi.get_thread_stats().print_all()
#this loop executes after leaving the above loop and is for cleanup on exit
for (m,pm) in mpstate.modules:
if hasattr(m, 'unload'):
print("Unloading module %s" % m.name)
m.unload()
sys.exit(1)
| gpl-3.0 |
lancezlin/ml_template_py | lib/python2.7/site-packages/sklearn/decomposition/tests/test_kernel_pca.py | 74 | 8472 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import (assert_array_almost_equal, assert_less,
assert_equal, assert_not_equal,
assert_raises)
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.metrics.pairwise import rbf_kernel
def test_kernel_pca():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
for eigen_solver in ("auto", "dense", "arpack"):
for kernel in ("linear", "rbf", "poly", histogram):
# histogram kernel produces singular matrix inside linalg.solve
# XXX use a least-squares approximation?
inv = not callable(kernel)
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=inv)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# non-regression test: previously, gamma would be 0 by default,
# forcing all eigenvalues to 0 under the poly kernel
assert_not_equal(X_fit_transformed.size, 0)
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
if inv:
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_invalid_parameters():
assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True,
kernel='precomputed')
def test_kernel_pca_consistent_transform():
# X_fit_ needs to retain the old, unmodified copy of X
state = np.random.RandomState(0)
X = state.rand(10, 10)
kpca = KernelPCA(random_state=state).fit(X)
transformed1 = kpca.transform(X)
X_copy = X.copy()
X[:, 0] = 666
transformed2 = kpca.transform(X_copy)
assert_array_almost_equal(transformed1, transformed2)
def test_kernel_pca_sparse():
rng = np.random.RandomState(0)
X_fit = sp.csr_matrix(rng.random_sample((5, 4)))
X_pred = sp.csr_matrix(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=False)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
# X_pred2 = kpca.inverse_transform(X_pred_transformed)
# assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_linear_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
# for a linear kernel, kernel PCA should find the same projection as PCA
# modulo the sign (direction)
# fit only the first four components: fifth is near zero eigenvalue, so
# can be trimmed due to roundoff error
assert_array_almost_equal(
np.abs(KernelPCA(4).fit(X_fit).transform(X_pred)),
np.abs(PCA(4).fit(X_fit).transform(X_pred)))
def test_kernel_pca_n_components():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
for c in [1, 2, 4]:
kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
shape = kpca.fit(X_fit).transform(X_pred).shape
assert_equal(shape, (2, c))
def test_remove_zero_eig():
X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
# n_components=None (default) => remove_zero_eig is True
kpca = KernelPCA()
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
kpca = KernelPCA(n_components=2)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 2))
kpca = KernelPCA(n_components=2, remove_zero_eig=True)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
def test_kernel_pca_precomputed():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
X_kpca = KernelPCA(4, eigen_solver=eigen_solver).\
fit(X_fit).transform(X_pred)
X_kpca2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_pred, X_fit.T))
X_kpca_train = KernelPCA(
4, eigen_solver=eigen_solver,
kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_fit, X_fit.T))
assert_array_almost_equal(np.abs(X_kpca),
np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train),
np.abs(X_kpca_train2))
def test_kernel_pca_invalid_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((2, 4))
kpca = KernelPCA(kernel="tototiti")
assert_raises(ValueError, kpca.fit, X_fit)
def test_gridsearch_pipeline():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="rbf", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
grid_search.fit(X, y)
assert_equal(grid_search.best_score_, 1)
def test_gridsearch_pipeline_precomputed():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model using a precomputed kernel.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(Perceptron__n_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.)
grid_search.fit(X_kernel, y)
assert_equal(grid_search.best_score_, 1)
def test_nested_circles():
# Test the linear separability of the first 2D KPCA transform
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron().fit(X, y).score(X, y)
assert_less(train_score, 0.8)
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(kernel="rbf", n_components=2,
fit_inverse_transform=True, gamma=2.)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron().fit(X_kpca, y).score(X_kpca, y)
assert_equal(train_score, 1.0)
| mit |
f3r/scikit-learn | sklearn/neighbors/nearest_centroid.py | 38 | 7356 | # -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_array, check_X_y, check_is_fitted
from ..utils.sparsefuncs import csc_median_axis_0
from ..utils.multiclass import check_classification_targets
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
Parameters
----------
metric: string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
The centroids for the samples corresponding to each class is the point
from which the sum of the distances (according to the metric) of all
samples that belong to that particular class are minimized.
If the "manhattan" metric is provided, this centroid is the median and
for all other metrics, the centroid is now set to be the mean.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
centroids_ : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == 'manhattan':
X, y = check_X_y(X, y, ['csc'])
else:
X, y = check_X_y(X, y, ['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
check_classification_targets(y)
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to it's members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in range(n_classes):
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
# XXX: Update other averaging methods according to the metrics.
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else:
if self.metric != 'euclidean':
warnings.warn("Averaging for metrics other than "
"euclidean and manhattan not supported. "
"The average is set to be the mean."
)
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1. / nk) + (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
check_is_fitted(self, 'centroids_')
X = check_array(X, accept_sparse='csr')
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
| bsd-3-clause |
dingliumath/quant-econ | examples/lqramsey.py | 3 | 9946 | """
Filename: lqramsey.py
Authors: Thomas Sargent, Doc-Jin Jang, Jeong-hun Choi, John Stachurski
This module provides code to compute Ramsey equilibria in a LQ economy with
distortionary taxation. The program computes allocations (consumption,
leisure), tax rates, revenues, the net present value of the debt and other
related quantities.
Functions for plotting the results are also provided below.
See the lecture at http://quant-econ.net/lqramsey.html for a description of
the model.
"""
import sys
import numpy as np
from numpy import sqrt, eye, dot, zeros, cumsum
from numpy.random import randn
import scipy.linalg
import matplotlib.pyplot as plt
from collections import namedtuple
from quantecon import nullspace, mc_sample_path, var_quadratic_sum
# == Set up a namedtuple to store data on the model economy == #
Economy = namedtuple('economy',
('beta', # Discount factor
'Sg', # Govt spending selector matrix
'Sd', # Exogenous endowment selector matrix
'Sb', # Utility parameter selector matrix
'Ss', # Coupon payments selector matrix
'discrete', # Discrete or continuous -- boolean
'proc')) # Stochastic process parameters
# == Set up a namedtuple to store return values for compute_paths() == #
Path = namedtuple('path',
('g', # Govt spending
'd', # Endowment
'b', # Utility shift parameter
's', # Coupon payment on existing debt
'c', # Consumption
'l', # Labor
'p', # Price
'tau', # Tax rate
'rvn', # Revenue
'B', # Govt debt
'R', # Risk free gross return
'pi', # One-period risk-free interest rate
'Pi', # Cumulative rate of return, adjusted
'xi')) # Adjustment factor for Pi
def compute_paths(T, econ):
"""
Compute simulated time paths for exogenous and endogenous variables.
Parameters
===========
T: int
Length of the simulation
econ: a namedtuple of type 'Economy', containing
beta - Discount factor
Sg - Govt spending selector matrix
Sd - Exogenous endowment selector matrix
Sb - Utility parameter selector matrix
Ss - Coupon payments selector matrix
discrete - Discrete exogenous process (True or False)
proc - Stochastic process parameters
Returns
========
path: a namedtuple of type 'Path', containing
g - Govt spending
d - Endowment
b - Utility shift parameter
s - Coupon payment on existing debt
c - Consumption
l - Labor
p - Price
tau - Tax rate
rvn - Revenue
B - Govt debt
R - Risk free gross return
pi - One-period risk-free interest rate
Pi - Cumulative rate of return, adjusted
xi - Adjustment factor for Pi
The corresponding values are flat numpy ndarrays.
"""
# == Simplify names == #
beta, Sg, Sd, Sb, Ss = econ.beta, econ.Sg, econ.Sd, econ.Sb, econ.Ss
if econ.discrete:
P, x_vals = econ.proc
else:
A, C = econ.proc
# == Simulate the exogenous process x == #
if econ.discrete:
state = mc_sample_path(P, init=0, sample_size=T)
x = x_vals[:, state]
else:
# == Generate an initial condition x0 satisfying x0 = A x0 == #
nx, nx = A.shape
x0 = nullspace((eye(nx) - A))
x0 = -x0 if (x0[nx-1] < 0) else x0
x0 = x0 / x0[nx-1]
# == Generate a time series x of length T starting from x0 == #
nx, nw = C.shape
x = zeros((nx, T))
w = randn(nw, T)
x[:, 0] = x0.T
for t in range(1, T):
x[:, t] = dot(A, x[:, t-1]) + dot(C, w[:, t])
# == Compute exogenous variable sequences == #
g, d, b, s = (dot(S, x).flatten() for S in (Sg, Sd, Sb, Ss))
# == Solve for Lagrange multiplier in the govt budget constraint == #
# In fact we solve for nu = lambda / (1 + 2*lambda). Here nu is the
# solution to a quadratic equation a(nu**2 - nu) + b = 0 where
# a and b are expected discounted sums of quadratic forms of the state.
Sm = Sb - Sd - Ss
# == Compute a and b == #
if econ.discrete:
ns = P.shape[0]
F = scipy.linalg.inv(np.identity(ns) - beta * P)
a0 = 0.5 * dot(F, dot(Sm, x_vals).T**2)[0]
H = dot(Sb - Sd + Sg, x_vals) * dot(Sg - Ss, x_vals)
b0 = 0.5 * dot(F, H.T)[0]
a0, b0 = float(a0), float(b0)
else:
H = dot(Sm.T, Sm)
a0 = 0.5 * var_quadratic_sum(A, C, H, beta, x0)
H = dot((Sb - Sd + Sg).T, (Sg + Ss))
b0 = 0.5 * var_quadratic_sum(A, C, H, beta, x0)
# == Test that nu has a real solution before assigning == #
warning_msg = """
Hint: you probably set government spending too {}. Elect a {}
Congress and start over.
"""
disc = a0**2 - 4 * a0 * b0
if disc >= 0:
nu = 0.5 * (a0 - sqrt(disc)) / a0
else:
print("There is no Ramsey equilibrium for these parameters.")
print(warning_msg.format('high', 'Republican'))
sys.exit(0)
# == Test that the Lagrange multiplier has the right sign == #
if nu * (0.5 - nu) < 0:
print("Negative multiplier on the government budget constraint.")
print(warning_msg.format('low', 'Democratic'))
sys.exit(0)
# == Solve for the allocation given nu and x == #
Sc = 0.5 * (Sb + Sd - Sg - nu * Sm)
Sl = 0.5 * (Sb - Sd + Sg - nu * Sm)
c = dot(Sc, x).flatten()
l = dot(Sl, x).flatten()
p = dot(Sb - Sc, x).flatten() # Price without normalization
tau = 1 - l / (b - c)
rvn = l * tau
# == Compute remaining variables == #
if econ.discrete:
H = dot(Sb - Sc, x_vals) * dot(Sl - Sg, x_vals) - dot(Sl, x_vals)**2
temp = dot(F, H.T).flatten()
B = temp[state] / p
H = dot(P[state, :], dot(Sb - Sc, x_vals).T).flatten()
R = p / (beta * H)
temp = dot(P[state, :], dot(Sb - Sc, x_vals).T).flatten()
xi = p[1:] / temp[:T-1]
else:
H = dot(Sl.T, Sl) - dot((Sb - Sc).T, Sl - Sg)
L = np.empty(T)
for t in range(T):
L[t] = var_quadratic_sum(A, C, H, beta, x[:, t])
B = L / p
Rinv = (beta * dot(dot(Sb - Sc, A), x)).flatten() / p
R = 1 / Rinv
AF1 = dot(Sb - Sc, x[:, 1:])
AF2 = dot(dot(Sb - Sc, A), x[:, :T-1])
xi = AF1 / AF2
xi = xi.flatten()
pi = B[1:] - R[:T-1] * B[:T-1] - rvn[:T-1] + g[:T-1]
Pi = cumsum(pi * xi)
# == Prepare return values == #
path = Path(g=g,
d=d,
b=b,
s=s,
c=c,
l=l,
p=p,
tau=tau,
rvn=rvn,
B=B,
R=R,
pi=pi,
Pi=Pi,
xi=xi)
return path
def gen_fig_1(path):
"""
The parameter is the path namedtuple returned by compute_paths(). See
the docstring of that function for details.
"""
T = len(path.c)
# == Prepare axes == #
num_rows, num_cols = 2, 2
fig, axes = plt.subplots(num_rows, num_cols, figsize=(14, 10))
plt.subplots_adjust(hspace=0.4)
for i in range(num_rows):
for j in range(num_cols):
axes[i, j].grid()
axes[i, j].set_xlabel(r'Time')
bbox = (0., 1.02, 1., .102)
legend_args = {'bbox_to_anchor': bbox, 'loc': 3, 'mode': 'expand'}
p_args = {'lw': 2, 'alpha': 0.7}
# == Plot consumption, govt expenditure and revenue == #
ax = axes[0, 0]
ax.plot(path.rvn, label=r'$\tau_t \ell_t$', **p_args)
ax.plot(path.g, label=r'$g_t$', **p_args)
ax.plot(path.c, label=r'$c_t$', **p_args)
ax.legend(ncol=3, **legend_args)
# == Plot govt expenditure and debt == #
ax = axes[0, 1]
ax.plot(list(range(1, T+1)), path.rvn, label=r'$\tau_t \ell_t$', **p_args)
ax.plot(list(range(1, T+1)), path.g, label=r'$g_t$', **p_args)
ax.plot(list(range(1, T)), path.B[1:T], label=r'$B_{t+1}$', **p_args)
ax.legend(ncol=3, **legend_args)
# == Plot risk free return == #
ax = axes[1, 0]
ax.plot(list(range(1, T+1)), path.R - 1, label=r'$R_t - 1$', **p_args)
ax.legend(ncol=1, **legend_args)
# == Plot revenue, expenditure and risk free rate == #
ax = axes[1, 1]
ax.plot(list(range(1, T+1)), path.rvn, label=r'$\tau_t \ell_t$', **p_args)
ax.plot(list(range(1, T+1)), path.g, label=r'$g_t$', **p_args)
axes[1, 1].plot(list(range(1, T)), path.pi, label=r'$\pi_{t+1}$', **p_args)
ax.legend(ncol=3, **legend_args)
plt.show()
def gen_fig_2(path):
"""
The parameter is the path namedtuple returned by compute_paths(). See
the docstring of that function for details.
"""
T = len(path.c)
# == Prepare axes == #
num_rows, num_cols = 2, 1
fig, axes = plt.subplots(num_rows, num_cols, figsize=(10, 10))
plt.subplots_adjust(hspace=0.5)
bbox = (0., 1.02, 1., .102)
bbox = (0., 1.02, 1., .102)
legend_args = {'bbox_to_anchor': bbox, 'loc': 3, 'mode': 'expand'}
p_args = {'lw': 2, 'alpha': 0.7}
# == Plot adjustment factor == #
ax = axes[0]
ax.plot(list(range(2, T+1)), path.xi, label=r'$\xi_t$', **p_args)
ax.grid()
ax.set_xlabel(r'Time')
ax.legend(ncol=1, **legend_args)
# == Plot adjusted cumulative return == #
ax = axes[1]
ax.plot(list(range(2, T+1)), path.Pi, label=r'$\Pi_t$', **p_args)
ax.grid()
ax.set_xlabel(r'Time')
ax.legend(ncol=1, **legend_args)
plt.show()
| bsd-3-clause |
gwpy/gwpy | gwpy/plot/axes.py | 2 | 22408 | # -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2018-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Extension of `~matplotlib.axes.Axes` for gwpy
"""
import warnings
from functools import wraps
from math import log
from numbers import Number
import numpy
from astropy.time import Time
from matplotlib import rcParams
from matplotlib.artist import allow_rasterization
from matplotlib.axes import Axes as _Axes
from matplotlib.axes._base import _process_plot_var_args
from matplotlib.collections import PolyCollection
from matplotlib.lines import Line2D
from matplotlib.projections import register_projection
from . import (Plot, colorbar as gcbar)
from .colors import format_norm
from .gps import GPS_SCALES
from .legend import HandlerLine2D
from ..time import to_gps
__author__ = 'Duncan Macleod <[email protected]>'
def log_norm(func):
"""Wrap ``func`` to handle custom gwpy keywords for a LogNorm colouring
"""
@wraps(func)
def decorated_func(*args, **kwargs):
norm, kwargs = format_norm(kwargs)
kwargs['norm'] = norm
return func(*args, **kwargs)
return decorated_func
def xlim_as_gps(func):
"""Wrap ``func`` to handle pass limit inputs through `gwpy.time.to_gps`
"""
@wraps(func)
def wrapped_func(self, left=None, right=None, **kw):
if right is None and numpy.iterable(left):
left, right = left
kw['left'] = left
kw['right'] = right
gpsscale = self.get_xscale() in GPS_SCALES
for key in ('left', 'right'):
if gpsscale:
try:
kw[key] = numpy.longdouble(str(to_gps(kw[key])))
except TypeError:
pass
return func(self, **kw)
return wrapped_func
def restore_grid(func):
"""Wrap ``func`` to preserve the Axes current grid settings.
"""
@wraps(func)
def wrapped_func(self, *args, **kwargs):
try:
grid = (
self.xaxis._minor_tick_kw["gridOn"],
self.xaxis._major_tick_kw["gridOn"],
self.yaxis._minor_tick_kw["gridOn"],
self.yaxis._major_tick_kw["gridOn"],
)
except KeyError: # matplotlib < 3.3.3
grid = (self.xaxis._gridOnMinor, self.xaxis._gridOnMajor,
self.yaxis._gridOnMinor, self.yaxis._gridOnMajor)
try:
return func(self, *args, **kwargs)
finally:
# reset grid
self.xaxis.grid(grid[0], which="minor")
self.xaxis.grid(grid[1], which="major")
self.yaxis.grid(grid[2], which="minor")
self.yaxis.grid(grid[3], which="major")
return wrapped_func
# -- new Axes -----------------------------------------------------------------
class Axes(_Axes):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# handle Series in `ax.plot()`
self._get_lines = PlotArgsProcessor(self)
# reset data formatters (for interactive plots) to support
# GPS time display
self.fmt_xdata = self._fmt_xdata
self.fmt_ydata = self._fmt_ydata
@allow_rasterization
def draw(self, *args, **kwargs):
labels = {}
for ax in (self.xaxis, self.yaxis):
if ax.get_scale() in GPS_SCALES and ax.isDefault_label:
labels[ax] = ax.get_label_text()
trans = ax.get_transform()
epoch = float(trans.get_epoch())
unit = trans.get_unit_name()
iso = Time(epoch, format='gps', scale='utc').iso
utc = iso.rstrip('0').rstrip('.')
ax.set_label_text('Time [{0!s}] from {1!s} UTC ({2!r})'.format(
unit, utc, epoch))
try:
super().draw(*args, **kwargs)
finally:
for ax in labels: # reset labels
ax.isDefault_label = True
# -- auto-gps helpers -----------------------
def _fmt_xdata(self, x):
if self.get_xscale() in GPS_SCALES:
return str(to_gps(x))
return self.xaxis.get_major_formatter().format_data_short(x)
def _fmt_ydata(self, y):
if self.get_yscale() in GPS_SCALES:
return str(to_gps(y))
return self.yaxis.get_major_formatter().format_data_short(y)
set_xlim = xlim_as_gps(_Axes.set_xlim)
def set_epoch(self, epoch):
"""Set the epoch for the current GPS scale.
This method will fail if the current X-axis scale isn't one of
the GPS scales. See :ref:`gwpy-plot-gps` for more details.
Parameters
----------
epoch : `float`, `str`
GPS-compatible time or date object, anything parseable by
:func:`~gwpy.time.to_gps` is fine.
"""
scale = self.get_xscale()
return self.set_xscale(scale, epoch=epoch)
def get_epoch(self):
"""Return the epoch for the current GPS scale/
This method will fail if the current X-axis scale isn't one of
the GPS scales. See :ref:`gwpy-plot-gps` for more details.
"""
return self.get_xaxis().get_transform().get_epoch()
# -- overloaded plotting methods ------------
def scatter(self, x, y, s=None, c=None, **kwargs):
# This method overloads Axes.scatter to enable quick
# sorting of data by the 'colour' array before scatter
# plotting.
# if we weren't asked to sort, or don't have any colours, don't
sort = kwargs.pop("sortbycolor", False)
if not sort:
return super().scatter(x, y, s=s, c=c, **kwargs)
# try and sort the colour array by value
try:
sortidx = numpy.asanyarray(c, dtype=float).argsort()
except ValueError as exc:
exc.args = (
"Axes.scatter argument 'sortbycolor' can only be used "
"with a simple array of floats in the colour array 'c'",
)
raise
def _sort(arr):
if arr is None or isinstance(arr, Number):
return arr
return numpy.asarray(arr)[sortidx]
# apply the sorting to each data array, and scatter
x, y, c, s = map(_sort, (x, y, c, s))
return super().scatter(x, y, s=s, c=c, **kwargs)
scatter.__doc__ = _Axes.scatter.__doc__.replace(
'marker :',
'sortbycolor : `bool`, optional, default: False\n'
' Sort scatter points by `c` array value, if given.\n\n'
'marker :',
)
@log_norm
def imshow(self, array, *args, **kwargs):
"""Display an image, i.e. data on a 2D regular raster.
If ``array`` is a :class:`~gwpy.types.Array2D` (e.g. a
:class:`~gwpy.spectrogram.Spectrogram`), then the defaults are
_different_ to those in the upstream
:meth:`~matplotlib.axes.Axes.imshow` method. Namely, the defaults are
- ``origin='lower'`` (coordinates start in lower-left corner)
- ``aspect='auto'`` (pixels are not forced to be square)
- ``interpolation='none'`` (no image interpolation is used)
In all other usage, the defaults from the upstream matplotlib method
are unchanged.
Parameters
----------
array : array-like or PIL image
The image data.
*args, **kwargs
All arguments and keywords are passed to the inherited
:meth:`~matplotlib.axes.Axes.imshow` method.
See also
--------
matplotlib.axes.Axes.imshow
for details of the image rendering
"""
if hasattr(array, "yspan"): # Array2D
return self._imshow_array2d(array, *args, **kwargs)
image = super().imshow(array, *args, **kwargs)
self.autoscale(enable=None, axis='both', tight=None)
return image
def _imshow_array2d(self, array, origin='lower', interpolation='none',
aspect='auto', **kwargs):
"""Render an `~gwpy.types.Array2D` using `Axes.imshow`
"""
# NOTE: If you change the defaults for this method, please update
# the docstring for `imshow` above.
# calculate extent
extent = tuple(array.xspan) + tuple(array.yspan)
if self.get_xscale() == 'log' and extent[0] == 0.:
extent = (1e-300,) + extent[1:]
if self.get_yscale() == 'log' and extent[2] == 0.:
extent = extent[:2] + (1e-300,) + extent[3:]
kwargs.setdefault('extent', extent)
return self.imshow(array.value.T, origin=origin, aspect=aspect,
interpolation=interpolation, **kwargs)
@restore_grid
@log_norm
def pcolormesh(self, *args, **kwargs):
"""Create a pseudocolor plot with a non-regular rectangular grid.
When using GWpy, this method can be called with a single argument
that is an :class:`~gwpy.types.Array2D`, for which the ``X`` and ``Y``
coordinate arrays will be determined from the indexing.
In all other usage, all ``args`` and ``kwargs`` are passed directly
to :meth:`~matplotlib.axes.Axes.pcolormesh`.
Notes
-----
Unlike the upstream :meth:`matplotlib.axes.Axes.pcolormesh`,
this method respects the current grid settings.
See also
--------
matplotlib.axes.Axes.pcolormesh
"""
if len(args) == 1 and hasattr(args[0], "yindex"): # Array2D
return self._pcolormesh_array2d(*args, **kwargs)
return super().pcolormesh(*args, **kwargs)
def _pcolormesh_array2d(self, array, *args, **kwargs):
"""Render an `~gwpy.types.Array2D` using `Axes.pcolormesh`
"""
x = numpy.concatenate((array.xindex.value, array.xspan[-1:]))
y = numpy.concatenate((array.yindex.value, array.yspan[-1:]))
xcoord, ycoord = numpy.meshgrid(x, y, copy=False, sparse=True)
return self.pcolormesh(xcoord, ycoord, array.value.T, *args, **kwargs)
def hist(self, x, *args, **kwargs):
x = numpy.asarray(x)
# re-format weights as array if given as float
weights = kwargs.get('weights', None)
if isinstance(weights, Number):
kwargs['weights'] = numpy.ones_like(x) * weights
# calculate log-spaced bins on-the-fly
if (
kwargs.pop('logbins', False)
and not numpy.iterable(kwargs.get('bins', None))
):
nbins = kwargs.get('bins', None) or rcParams.get('hist.bins', 30)
# get range
hrange = kwargs.pop('range', None)
if hrange is None:
try:
hrange = numpy.min(x), numpy.max(x)
except ValueError as exc:
if str(exc).startswith('zero-size array'): # no data
exc.args = ('cannot generate log-spaced histogram '
'bins for zero-size array, '
'please pass `bins` or `range` manually',)
raise
# log-scale the axis and extract the base
if kwargs.get('orientation') == 'horizontal':
self.set_yscale('log', nonpositive='clip')
logbase = self.yaxis._scale.base
else:
self.set_xscale('log', nonpositive='clip')
logbase = self.xaxis._scale.base
# generate the bins
kwargs['bins'] = numpy.logspace(
log(hrange[0], logbase), log(hrange[1], logbase),
nbins+1, endpoint=True)
return super().hist(x, *args, **kwargs)
hist.__doc__ = _Axes.hist.__doc__.replace(
'color :',
'logbins : boolean, optional\n'
' If ``True``, use logarithmically-spaced histogram bins.\n\n'
' Default is ``False``\n\n'
'color :')
# -- new plotting methods -------------------
def plot_mmm(self, data, lower=None, upper=None, **kwargs):
"""Plot a `Series` as a line, with a shaded region around it.
The ``data`` `Series` is drawn, while the ``lower`` and ``upper``
`Series` are plotted lightly below and above, with a fill
between them and the ``data``.
All three `Series` should have the same `~Series.index` array.
Parameters
----------
data : `~gwpy.types.Series`
Data to plot normally.
lower : `~gwpy.types.Series`
Lower boundary (on Y-axis) for shade.
upper : `~gwpy.types.Series`
Upper boundary (on Y-axis) for shade.
**kwargs
Any other keyword arguments acceptable for
:meth:`~matplotlib.Axes.plot`.
Returns
-------
artists : `tuple`
All of the drawn artists:
- `~matplotlib.lines.Line2d` for ``data``,
- `~matplotlib.lines.Line2D` for ``lower``, if given
- `~matplotlib.lines.Line2D` for ``upper``, if given
- `~matplitlib.collections.PolyCollection` for shading
See also
--------
matplotlib.axes.Axes.plot
for a full description of acceptable ``*args`` and ``**kwargs``
"""
alpha = kwargs.pop('alpha', .1)
# plot mean
line, = self.plot(data, **kwargs)
out = [line]
# modify keywords for shading
kwargs.update({
'label': '',
'linewidth': line.get_linewidth() / 2,
'color': line.get_color(),
'alpha': alpha * 2,
})
# plot lower and upper Series
fill = [data.xindex.value, data.value, data.value]
for i, bound in enumerate((lower, upper)):
if bound is not None:
out.extend(self.plot(bound, **kwargs))
fill[i+1] = bound.value
# fill between
out.append(self.fill_between(
*fill, alpha=alpha, color=kwargs['color'],
rasterized=kwargs.get('rasterized', True)))
return out
def tile(self, x, y, w, h, color=None,
anchor='center', edgecolors='face', linewidth=0.8,
**kwargs):
"""Plot rectanguler tiles based onto these `Axes`.
``x`` and ``y`` give the anchor point for each tile, with
``w`` and ``h`` giving the extent in the X and Y axis respectively.
Parameters
----------
x, y, w, h : `array_like`, shape (n, )
Input data
color : `array_like`, shape (n, )
Array of amplitudes for tile color
anchor : `str`, optional
Anchor point for tiles relative to ``(x, y)`` coordinates, one of
- ``'center'`` - center tile on ``(x, y)``
- ``'ll'`` - ``(x, y)`` defines lower-left corner of tile
- ``'lr'`` - ``(x, y)`` defines lower-right corner of tile
- ``'ul'`` - ``(x, y)`` defines upper-left corner of tile
- ``'ur'`` - ``(x, y)`` defines upper-right corner of tile
**kwargs
Other keywords are passed to
:meth:`~matplotlib.collections.PolyCollection`
Returns
-------
collection : `~matplotlib.collections.PolyCollection`
the collection of tiles drawn
Examples
--------
>>> import numpy
>>> from matplotlib import pyplot
>>> import gwpy.plot # to get gwpy's Axes
>>> x = numpy.arange(10)
>>> y = numpy.arange(x.size)
>>> w = numpy.ones_like(x) * .8
>>> h = numpy.ones_like(x) * .8
>>> fig = pyplot.figure()
>>> ax = fig.gca()
>>> ax.tile(x, y, w, h, anchor='ll')
>>> pyplot.show()
"""
# get color and sort
if color is not None and kwargs.get('c_sort', True):
sortidx = color.argsort()
x = x[sortidx]
y = y[sortidx]
w = w[sortidx]
h = h[sortidx]
color = color[sortidx]
# define how to make a polygon for each tile
if anchor == 'll':
def _poly(x, y, w, h):
return ((x, y), (x, y+h), (x+w, y+h), (x+w, y))
elif anchor == 'lr':
def _poly(x, y, w, h):
return ((x-w, y), (x-w, y+h), (x, y+h), (x, y))
elif anchor == 'ul':
def _poly(x, y, w, h):
return ((x, y-h), (x, y), (x+w, y), (x+w, y-h))
elif anchor == 'ur':
def _poly(x, y, w, h):
return ((x-w, y-h), (x-w, y), (x, y), (x, y-h))
elif anchor == 'center':
def _poly(x, y, w, h):
return ((x-w/2., y-h/2.), (x-w/2., y+h/2.),
(x+w/2., y+h/2.), (x+w/2., y-h/2.))
else:
raise ValueError("Unrecognised tile anchor {!r}".format(anchor))
# build collection
cmap = kwargs.pop('cmap', rcParams['image.cmap'])
coll = PolyCollection((_poly(*tile) for tile in zip(x, y, w, h)),
edgecolors=edgecolors, linewidth=linewidth,
**kwargs)
if color is not None:
coll.set_array(color)
coll.set_cmap(cmap)
out = self.add_collection(coll)
self.autoscale_view()
return out
# -- overloaded auxiliary methods -----------
def legend(self, *args, **kwargs):
# handle deprecated keywords
linewidth = kwargs.pop("linewidth", None)
if linewidth:
warnings.warn(
"the linewidth keyword to gwpy.plot.Axes.legend has been "
"deprecated and will be removed in a future release; "
"please update your code to use a custom legend handler, "
"e.g. gwpy.plot.legend.HandlerLine2D.",
DeprecationWarning,
)
alpha = kwargs.pop("alpha", None)
if alpha:
kwargs.setdefault("framealpha", alpha)
warnings.warn(
"the alpha keyword to gwpy.plot.Axes.legend has been "
"deprecated and will be removed in a future release; "
"use framealpha instead.",
DeprecationWarning,
)
# build custom handler
handler_map = kwargs.setdefault("handler_map", dict())
if isinstance(handler_map, dict):
handler_map.setdefault(Line2D, HandlerLine2D(linewidth or 6))
# create legend
return super().legend(*args, **kwargs)
legend.__doc__ = _Axes.legend.__doc__.replace(
"Call signatures",
""".. note::
This method uses a custom default legend handler for
`~matplotlib.lines.Line2D` objects, with increased linewidth relative
to the upstream :meth:`~matplotlib.axes.Axes.legend` method.
To disable this, pass ``handler_map=None``, or create and pass your
own handler class. See :ref:`gwpy-plot-legend` for more details.
Call signatures""",
)
def colorbar(self, mappable=None, **kwargs):
"""Add a `~matplotlib.colorbar.Colorbar` to these `Axes`
Parameters
----------
mappable : matplotlib data collection, optional
collection against which to map the colouring, default will
be the last added mappable artist (collection or image)
fraction : `float`, optional
fraction of space to steal from these `Axes` to make space
for the new axes, default is ``0.`` if ``use_axesgrid=True``
is given (default), otherwise default is ``.15`` to match
the upstream matplotlib default.
**kwargs
other keyword arguments to be passed to the
:meth:`Plot.colorbar` generator
Returns
-------
cbar : `~matplotlib.colorbar.Colorbar`
the newly added `Colorbar`
See also
--------
Plot.colorbar
"""
fig = self.get_figure()
if kwargs.get('use_axesgrid', True):
kwargs.setdefault('fraction', 0.)
if kwargs.get('fraction', 0.) == 0.:
kwargs.setdefault('use_axesgrid', True)
mappable, kwargs = gcbar.process_colorbar_kwargs(
fig, mappable=mappable, ax=self, **kwargs)
if isinstance(fig, Plot):
# either we have created colorbar Axes using axesgrid1, or
# the user already gave use_axesgrid=False, so we forcefully
# disable axesgrid here in case fraction == 0., which causes
# gridspec colorbars to fail.
kwargs['use_axesgrid'] = False
return fig.colorbar(mappable, **kwargs)
# override default Axes with this one by registering a projection with the
# same name
register_projection(Axes)
# -- overload Axes.plot() to handle Series ------------------------------------
class PlotArgsProcessor(_process_plot_var_args):
"""This class controls how ax.plot() works
"""
def __call__(self, *args, **kwargs):
"""Find `Series` data in `plot()` args and unwrap
"""
newargs = []
while args:
# strip first argument
this, args = args[:1], args[1:]
# it its a 1-D Series, then parse it as (xindex, value)
if hasattr(this[0], "xindex") and this[0].ndim == 1:
this = (this[0].xindex.value, this[0].value)
# otherwise treat as normal (must be a second argument)
else:
this += args[:1]
args = args[1:]
# allow colour specs
if args and isinstance(args[0], str):
this += args[0],
args = args[1:]
newargs.extend(this)
return super().__call__(*newargs, **kwargs)
| gpl-3.0 |
SickGear/SickGear | lib/dateutil/parser/_parser.py | 2 | 60702 | # -*- coding: utf-8 -*-
"""
This module offers a generic date/time string parser which is able to parse
most known formats to represent a date and/or time.
This module attempts to be forgiving with regards to unlikely input formats,
returning a datetime object even for dates which are ambiguous. If an element
of a date/time stamp is omitted, the following rules are applied:
- If AM or PM is left unspecified, a 24-hour clock is assumed, however, an hour
on a 12-hour clock (``0 <= hour <= 12``) *must* be specified if AM or PM is
specified.
- If a time zone is omitted, a timezone-naive datetime is returned.
If any other elements are missing, they are taken from the
:class:`datetime.datetime` object passed to the parameter ``default``. If this
results in a day number exceeding the valid number of days per month, the
value falls back to the end of the month.
Additional resources about date/time string formats can be found below:
- `A summary of the international standard date and time notation
<https://www.cl.cam.ac.uk/~mgk25/iso-time.html>`_
- `W3C Date and Time Formats <https://www.w3.org/TR/NOTE-datetime>`_
- `Time Formats (Planetary Rings Node) <https://pds-rings.seti.org:443/tools/time_formats.html>`_
- `CPAN ParseDate module
<https://metacpan.org/pod/release/MUIR/Time-modules-2013.0912/lib/Time/ParseDate.pm>`_
- `Java SimpleDateFormat Class
<https://docs.oracle.com/javase/6/docs/api/java/text/SimpleDateFormat.html>`_
"""
from __future__ import unicode_literals
import datetime
import re
import string
import time
import warnings
from calendar import monthrange
from io import StringIO
import six
from six import integer_types, text_type
from decimal import Decimal
from warnings import warn
from .. import relativedelta
from .. import tz
__all__ = ["parse", "parserinfo", "ParserError"]
# TODO: pandas.core.tools.datetimes imports this explicitly. Might be worth
# making public and/or figuring out if there is something we can
# take off their plate.
class _timelex(object):
# Fractional seconds are sometimes split by a comma
_split_decimal = re.compile("([.,])")
def __init__(self, instream):
if six.PY2:
# In Python 2, we can't duck type properly because unicode has
# a 'decode' function, and we'd be double-decoding
if isinstance(instream, (bytes, bytearray)):
instream = instream.decode()
else:
if getattr(instream, 'decode', None) is not None:
instream = instream.decode()
if isinstance(instream, text_type):
instream = StringIO(instream)
elif getattr(instream, 'read', None) is None:
raise TypeError('Parser must be a string or character stream, not '
'{itype}'.format(itype=instream.__class__.__name__))
self.instream = instream
self.charstack = []
self.tokenstack = []
self.eof = False
def get_token(self):
"""
This function breaks the time string into lexical units (tokens), which
can be parsed by the parser. Lexical units are demarcated by changes in
the character set, so any continuous string of letters is considered
one unit, any continuous string of numbers is considered one unit.
The main complication arises from the fact that dots ('.') can be used
both as separators (e.g. "Sep.20.2009") or decimal points (e.g.
"4:30:21.447"). As such, it is necessary to read the full context of
any dot-separated strings before breaking it into tokens; as such, this
function maintains a "token stack", for when the ambiguous context
demands that multiple tokens be parsed at once.
"""
if self.tokenstack:
return self.tokenstack.pop(0)
seenletters = False
token = None
state = None
while not self.eof:
# We only realize that we've reached the end of a token when we
# find a character that's not part of the current token - since
# that character may be part of the next token, it's stored in the
# charstack.
if self.charstack:
nextchar = self.charstack.pop(0)
else:
nextchar = self.instream.read(1)
while nextchar == '\x00':
nextchar = self.instream.read(1)
if not nextchar:
self.eof = True
break
elif not state:
# First character of the token - determines if we're starting
# to parse a word, a number or something else.
token = nextchar
if self.isword(nextchar):
state = 'a'
elif self.isnum(nextchar):
state = '0'
elif self.isspace(nextchar):
token = ' '
break # emit token
else:
break # emit token
elif state == 'a':
# If we've already started reading a word, we keep reading
# letters until we find something that's not part of a word.
seenletters = True
if self.isword(nextchar):
token += nextchar
elif nextchar == '.':
token += nextchar
state = 'a.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == '0':
# If we've already started reading a number, we keep reading
# numbers until we find something that doesn't fit.
if self.isnum(nextchar):
token += nextchar
elif nextchar == '.' or (nextchar == ',' and len(token) >= 2):
token += nextchar
state = '0.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == 'a.':
# If we've seen some letters and a dot separator, continue
# parsing, and the tokens will be broken up later.
seenletters = True
if nextchar == '.' or self.isword(nextchar):
token += nextchar
elif self.isnum(nextchar) and token[-1] == '.':
token += nextchar
state = '0.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == '0.':
# If we've seen at least one dot separator, keep going, we'll
# break up the tokens later.
if nextchar == '.' or self.isnum(nextchar):
token += nextchar
elif self.isword(nextchar) and token[-1] == '.':
token += nextchar
state = 'a.'
else:
self.charstack.append(nextchar)
break # emit token
if (state in ('a.', '0.') and (seenletters or token.count('.') > 1 or
token[-1] in '.,')):
l = self._split_decimal.split(token)
token = l[0]
for tok in l[1:]:
if tok:
self.tokenstack.append(tok)
if state == '0.' and token.count('.') == 0:
token = token.replace(',', '.')
return token
def __iter__(self):
return self
def __next__(self):
token = self.get_token()
if token is None:
raise StopIteration
return token
def next(self):
return self.__next__() # Python 2.x support
@classmethod
def split(cls, s):
return list(cls(s))
@classmethod
def isword(cls, nextchar):
""" Whether or not the next character is part of a word """
return nextchar.isalpha()
@classmethod
def isnum(cls, nextchar):
""" Whether the next character is part of a number """
return nextchar.isdigit()
@classmethod
def isspace(cls, nextchar):
""" Whether the next character is whitespace """
return nextchar.isspace()
class _resultbase(object):
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, None)
def _repr(self, classname):
l = []
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, repr(value)))
return "%s(%s)" % (classname, ", ".join(l))
def __len__(self):
return (sum(getattr(self, attr) is not None
for attr in self.__slots__))
def __repr__(self):
return self._repr(self.__class__.__name__)
class parserinfo(object):
"""
Class which handles what inputs are accepted. Subclass this to customize
the language and acceptable values for each parameter.
:param dayfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
``yearfirst`` is set to ``True``, this distinguishes between YDM
and YMD. Default is ``False``.
:param yearfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the year. If ``True``, the first number is taken
to be the year, otherwise the last number is taken to be the year.
Default is ``False``.
"""
# m from a.m/p.m, t from ISO T separator
JUMP = [" ", ".", ",", ";", "-", "/", "'",
"at", "on", "and", "ad", "m", "t", "of",
"st", "nd", "rd", "th"]
WEEKDAYS = [("Mon", "Monday"),
("Tue", "Tuesday"), # TODO: "Tues"
("Wed", "Wednesday"),
("Thu", "Thursday"), # TODO: "Thurs"
("Fri", "Friday"),
("Sat", "Saturday"),
("Sun", "Sunday")]
MONTHS = [("Jan", "January"),
("Feb", "February"), # TODO: "Febr"
("Mar", "March"),
("Apr", "April"),
("May", "May"),
("Jun", "June"),
("Jul", "July"),
("Aug", "August"),
("Sep", "Sept", "September"),
("Oct", "October"),
("Nov", "November"),
("Dec", "December")]
HMS = [("h", "hour", "hours"),
("m", "minute", "minutes"),
("s", "second", "seconds")]
AMPM = [("am", "a"),
("pm", "p")]
UTCZONE = ["UTC", "GMT", "Z", "z"]
PERTAIN = ["of"]
TZOFFSET = {}
# TODO: ERA = ["AD", "BC", "CE", "BCE", "Stardate",
# "Anno Domini", "Year of Our Lord"]
def __init__(self, dayfirst=False, yearfirst=False):
self._jump = self._convert(self.JUMP)
self._weekdays = self._convert(self.WEEKDAYS)
self._months = self._convert(self.MONTHS)
self._hms = self._convert(self.HMS)
self._ampm = self._convert(self.AMPM)
self._utczone = self._convert(self.UTCZONE)
self._pertain = self._convert(self.PERTAIN)
self.dayfirst = dayfirst
self.yearfirst = yearfirst
self._year = time.localtime().tm_year
self._century = self._year // 100 * 100
def _convert(self, lst):
dct = {}
for i, v in enumerate(lst):
if isinstance(v, tuple):
for v in v:
dct[v.lower()] = i
else:
dct[v.lower()] = i
return dct
def jump(self, name):
return name.lower() in self._jump
def weekday(self, name):
try:
return self._weekdays[name.lower()]
except KeyError:
pass
return None
def month(self, name):
try:
return self._months[name.lower()] + 1
except KeyError:
pass
return None
def hms(self, name):
try:
return self._hms[name.lower()]
except KeyError:
return None
def ampm(self, name):
try:
return self._ampm[name.lower()]
except KeyError:
return None
def pertain(self, name):
return name.lower() in self._pertain
def utczone(self, name):
return name.lower() in self._utczone
def tzoffset(self, name):
if name in self._utczone:
return 0
return self.TZOFFSET.get(name)
def convertyear(self, year, century_specified=False):
"""
Converts two-digit years to year within [-50, 49]
range of self._year (current local time)
"""
# Function contract is that the year is always positive
assert year >= 0
if year < 100 and not century_specified:
# assume current century to start
year += self._century
if year >= self._year + 50: # if too far in future
year -= 100
elif year < self._year - 50: # if too far in past
year += 100
return year
def validate(self, res):
# move to info
if res.year is not None:
res.year = self.convertyear(res.year, res.century_specified)
if ((res.tzoffset == 0 and not res.tzname) or
(res.tzname == 'Z' or res.tzname == 'z')):
res.tzname = "UTC"
res.tzoffset = 0
elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname):
res.tzoffset = 0
return True
class _ymd(list):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.century_specified = False
self.dstridx = None
self.mstridx = None
self.ystridx = None
@property
def has_year(self):
return self.ystridx is not None
@property
def has_month(self):
return self.mstridx is not None
@property
def has_day(self):
return self.dstridx is not None
def could_be_day(self, value):
if self.has_day:
return False
elif not self.has_month:
return 1 <= value <= 31
elif not self.has_year:
# Be permissive, assume leap year
month = self[self.mstridx]
return 1 <= value <= monthrange(2000, month)[1]
else:
month = self[self.mstridx]
year = self[self.ystridx]
return 1 <= value <= monthrange(year, month)[1]
def append(self, val, label=None):
if hasattr(val, '__len__'):
if val.isdigit() and len(val) > 2:
self.century_specified = True
if label not in [None, 'Y']: # pragma: no cover
raise ValueError(label)
label = 'Y'
elif val > 100:
self.century_specified = True
if label not in [None, 'Y']: # pragma: no cover
raise ValueError(label)
label = 'Y'
super(self.__class__, self).append(int(val))
if label == 'M':
if self.has_month:
raise ValueError('Month is already set')
self.mstridx = len(self) - 1
elif label == 'D':
if self.has_day:
raise ValueError('Day is already set')
self.dstridx = len(self) - 1
elif label == 'Y':
if self.has_year:
raise ValueError('Year is already set')
self.ystridx = len(self) - 1
def _resolve_from_stridxs(self, strids):
"""
Try to resolve the identities of year/month/day elements using
ystridx, mstridx, and dstridx, if enough of these are specified.
"""
if len(self) == 3 and len(strids) == 2:
# we can back out the remaining stridx value
missing = [x for x in range(3) if x not in strids.values()]
key = [x for x in ['y', 'm', 'd'] if x not in strids]
assert len(missing) == len(key) == 1
key = key[0]
val = missing[0]
strids[key] = val
assert len(self) == len(strids) # otherwise this should not be called
out = {key: self[strids[key]] for key in strids}
return (out.get('y'), out.get('m'), out.get('d'))
def resolve_ymd(self, yearfirst, dayfirst):
len_ymd = len(self)
year, month, day = (None, None, None)
strids = (('y', self.ystridx),
('m', self.mstridx),
('d', self.dstridx))
strids = {key: val for key, val in strids if val is not None}
if (len(self) == len(strids) > 0 or
(len(self) == 3 and len(strids) == 2)):
return self._resolve_from_stridxs(strids)
mstridx = self.mstridx
if len_ymd > 3:
raise ValueError("More than three YMD values")
elif len_ymd == 1 or (mstridx is not None and len_ymd == 2):
# One member, or two members with a month string
if mstridx is not None:
month = self[mstridx]
# since mstridx is 0 or 1, self[mstridx-1] always
# looks up the other element
other = self[mstridx - 1]
else:
other = self[0]
if len_ymd > 1 or mstridx is None:
if other > 31:
year = other
else:
day = other
elif len_ymd == 2:
# Two members with numbers
if self[0] > 31:
# 99-01
year, month = self
elif self[1] > 31:
# 01-99
month, year = self
elif dayfirst and self[1] <= 12:
# 13-01
day, month = self
else:
# 01-13
month, day = self
elif len_ymd == 3:
# Three members
if mstridx == 0:
if self[1] > 31:
# Apr-2003-25
month, year, day = self
else:
month, day, year = self
elif mstridx == 1:
if self[0] > 31 or (yearfirst and self[2] <= 31):
# 99-Jan-01
year, month, day = self
else:
# 01-Jan-01
# Give precedence to day-first, since
# two-digit years is usually hand-written.
day, month, year = self
elif mstridx == 2:
# WTF!?
if self[1] > 31:
# 01-99-Jan
day, year, month = self
else:
# 99-01-Jan
year, day, month = self
else:
if (self[0] > 31 or
self.ystridx == 0 or
(yearfirst and self[1] <= 12 and self[2] <= 31)):
# 99-01-01
if dayfirst and self[2] <= 12:
year, day, month = self
else:
year, month, day = self
elif self[0] > 12 or (dayfirst and self[1] <= 12):
# 13-01-01
day, month, year = self
else:
# 01-13-01
month, day, year = self
return year, month, day
class parser(object):
def __init__(self, info=None):
self.info = info or parserinfo()
def parse(self, timestr, default=None,
ignoretz=False, tzinfos=None, **kwargs):
"""
Parse the date/time string into a :class:`datetime.datetime` object.
:param timestr:
Any date/time string using the supported formats.
:param default:
The default datetime object, if this is a datetime object and not
``None``, elements specified in ``timestr`` replace elements in the
default object.
:param ignoretz:
If set ``True``, time zones in parsed strings are ignored and a
naive :class:`datetime.datetime` object is returned.
:param tzinfos:
Additional time zone names / aliases which may be present in the
string. This argument maps time zone names (and optionally offsets
from those time zones) to time zones. This parameter can be a
dictionary with timezone aliases mapping time zone names to time
zones or a function taking two parameters (``tzname`` and
``tzoffset``) and returning a time zone.
The timezones to which the names are mapped can be an integer
offset from UTC in seconds or a :class:`tzinfo` object.
.. doctest::
:options: +NORMALIZE_WHITESPACE
>>> from dateutil.parser import parse
>>> from dateutil.tz import gettz
>>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")}
>>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200))
>>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21,
tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
This parameter is ignored if ``ignoretz`` is set.
:param \\*\\*kwargs:
Keyword arguments as passed to ``_parse()``.
:return:
Returns a :class:`datetime.datetime` object or, if the
``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
first element being a :class:`datetime.datetime` object, the second
a tuple containing the fuzzy tokens.
:raises ParserError:
Raised for invalid or unknown string format, if the provided
:class:`tzinfo` is not in a valid format, or if an invalid date
would be created.
:raises TypeError:
Raised for non-string or character stream input.
:raises OverflowError:
Raised if the parsed date exceeds the largest valid C integer on
your system.
"""
if default is None:
default = datetime.datetime.now().replace(hour=0, minute=0,
second=0, microsecond=0)
res, skipped_tokens = self._parse(timestr, **kwargs)
if res is None:
raise ParserError("Unknown string format: %s", timestr)
if len(res) == 0:
raise ParserError("String does not contain a date: %s", timestr)
try:
ret = self._build_naive(res, default)
except ValueError as e:
six.raise_from(ParserError(str(e) + ": %s", timestr), e)
if not ignoretz:
ret = self._build_tzaware(ret, res, tzinfos)
if kwargs.get('fuzzy_with_tokens', False):
return ret, skipped_tokens
else:
return ret
class _result(_resultbase):
__slots__ = ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond",
"tzname", "tzoffset", "ampm","any_unused_tokens"]
def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False,
fuzzy_with_tokens=False):
"""
Private method which performs the heavy lifting of parsing, called from
``parse()``, which passes on its ``kwargs`` to this function.
:param timestr:
The string to parse.
:param dayfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
``yearfirst`` is set to ``True``, this distinguishes between YDM
and YMD. If set to ``None``, this value is retrieved from the
current :class:`parserinfo` object (which itself defaults to
``False``).
:param yearfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the year. If ``True``, the first number is taken
to be the year, otherwise the last number is taken to be the year.
If this is set to ``None``, the value is retrieved from the current
:class:`parserinfo` object (which itself defaults to ``False``).
:param fuzzy:
Whether to allow fuzzy parsing, allowing for string like "Today is
January 1, 2047 at 8:21:00AM".
:param fuzzy_with_tokens:
If ``True``, ``fuzzy`` is automatically set to True, and the parser
will return a tuple where the first element is the parsed
:class:`datetime.datetime` datetimestamp and the second element is
a tuple containing the portions of the string which were ignored:
.. doctest::
>>> from dateutil.parser import parse
>>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
(datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
"""
if fuzzy_with_tokens:
fuzzy = True
info = self.info
if dayfirst is None:
dayfirst = info.dayfirst
if yearfirst is None:
yearfirst = info.yearfirst
res = self._result()
l = _timelex.split(timestr) # Splits the timestr into tokens
skipped_idxs = []
# year/month/day list
ymd = _ymd()
len_l = len(l)
i = 0
try:
while i < len_l:
# Check if it's a number
value_repr = l[i]
try:
value = float(value_repr)
except ValueError:
value = None
if value is not None:
# Numeric token
i = self._parse_numeric_token(l, i, info, ymd, res, fuzzy)
# Check weekday
elif info.weekday(l[i]) is not None:
value = info.weekday(l[i])
res.weekday = value
# Check month name
elif info.month(l[i]) is not None:
value = info.month(l[i])
ymd.append(value, 'M')
if i + 1 < len_l:
if l[i + 1] in ('-', '/'):
# Jan-01[-99]
sep = l[i + 1]
ymd.append(l[i + 2])
if i + 3 < len_l and l[i + 3] == sep:
# Jan-01-99
ymd.append(l[i + 4])
i += 2
i += 2
elif (i + 4 < len_l and l[i + 1] == l[i + 3] == ' ' and
info.pertain(l[i + 2])):
# Jan of 01
# In this case, 01 is clearly year
if l[i + 4].isdigit():
# Convert it here to become unambiguous
value = int(l[i + 4])
year = str(info.convertyear(value))
ymd.append(year, 'Y')
else:
# Wrong guess
pass
# TODO: not hit in tests
i += 4
# Check am/pm
elif info.ampm(l[i]) is not None:
value = info.ampm(l[i])
val_is_ampm = self._ampm_valid(res.hour, res.ampm, fuzzy)
if val_is_ampm:
res.hour = self._adjust_ampm(res.hour, value)
res.ampm = value
elif fuzzy:
skipped_idxs.append(i)
# Check for a timezone name
elif self._could_be_tzname(res.hour, res.tzname, res.tzoffset, l[i]):
res.tzname = l[i]
res.tzoffset = info.tzoffset(res.tzname)
# Check for something like GMT+3, or BRST+3. Notice
# that it doesn't mean "I am 3 hours after GMT", but
# "my time +3 is GMT". If found, we reverse the
# logic so that timezone parsing code will get it
# right.
if i + 1 < len_l and l[i + 1] in ('+', '-'):
l[i + 1] = ('+', '-')[l[i + 1] == '+']
res.tzoffset = None
if info.utczone(res.tzname):
# With something like GMT+3, the timezone
# is *not* GMT.
res.tzname = None
# Check for a numbered timezone
elif res.hour is not None and l[i] in ('+', '-'):
signal = (-1, 1)[l[i] == '+']
len_li = len(l[i + 1])
# TODO: check that l[i + 1] is integer?
if len_li == 4:
# -0300
hour_offset = int(l[i + 1][:2])
min_offset = int(l[i + 1][2:])
elif i + 2 < len_l and l[i + 2] == ':':
# -03:00
hour_offset = int(l[i + 1])
min_offset = int(l[i + 3]) # TODO: Check that l[i+3] is minute-like?
i += 2
elif len_li <= 2:
# -[0]3
hour_offset = int(l[i + 1][:2])
min_offset = 0
else:
raise ValueError(timestr)
res.tzoffset = signal * (hour_offset * 3600 + min_offset * 60)
# Look for a timezone name between parenthesis
if (i + 5 < len_l and
info.jump(l[i + 2]) and l[i + 3] == '(' and
l[i + 5] == ')' and
3 <= len(l[i + 4]) and
self._could_be_tzname(res.hour, res.tzname,
None, l[i + 4])):
# -0300 (BRST)
res.tzname = l[i + 4]
i += 4
i += 1
# Check jumps
elif not (info.jump(l[i]) or fuzzy):
raise ValueError(timestr)
else:
skipped_idxs.append(i)
i += 1
# Process year/month/day
year, month, day = ymd.resolve_ymd(yearfirst, dayfirst)
res.century_specified = ymd.century_specified
res.year = year
res.month = month
res.day = day
except (IndexError, ValueError):
return None, None
if not info.validate(res):
return None, None
if fuzzy_with_tokens:
skipped_tokens = self._recombine_skipped(l, skipped_idxs)
return res, tuple(skipped_tokens)
else:
return res, None
def _parse_numeric_token(self, tokens, idx, info, ymd, res, fuzzy):
# Token is a number
value_repr = tokens[idx]
try:
value = self._to_decimal(value_repr)
except Exception as e:
six.raise_from(ValueError('Unknown numeric token'), e)
len_li = len(value_repr)
len_l = len(tokens)
if (len(ymd) == 3 and len_li in (2, 4) and
res.hour is None and
(idx + 1 >= len_l or
(tokens[idx + 1] != ':' and
info.hms(tokens[idx + 1]) is None))):
# 19990101T23[59]
s = tokens[idx]
res.hour = int(s[:2])
if len_li == 4:
res.minute = int(s[2:])
elif len_li == 6 or (len_li > 6 and tokens[idx].find('.') == 6):
# YYMMDD or HHMMSS[.ss]
s = tokens[idx]
if not ymd and '.' not in tokens[idx]:
ymd.append(s[:2])
ymd.append(s[2:4])
ymd.append(s[4:])
else:
# 19990101T235959[.59]
# TODO: Check if res attributes already set.
res.hour = int(s[:2])
res.minute = int(s[2:4])
res.second, res.microsecond = self._parsems(s[4:])
elif len_li in (8, 12, 14):
# YYYYMMDD
s = tokens[idx]
ymd.append(s[:4], 'Y')
ymd.append(s[4:6])
ymd.append(s[6:8])
if len_li > 8:
res.hour = int(s[8:10])
res.minute = int(s[10:12])
if len_li > 12:
res.second = int(s[12:])
elif self._find_hms_idx(idx, tokens, info, allow_jump=True) is not None:
# HH[ ]h or MM[ ]m or SS[.ss][ ]s
hms_idx = self._find_hms_idx(idx, tokens, info, allow_jump=True)
(idx, hms) = self._parse_hms(idx, tokens, info, hms_idx)
if hms is not None:
# TODO: checking that hour/minute/second are not
# already set?
self._assign_hms(res, value_repr, hms)
elif idx + 2 < len_l and tokens[idx + 1] == ':':
# HH:MM[:SS[.ss]]
res.hour = int(value)
value = self._to_decimal(tokens[idx + 2]) # TODO: try/except for this?
(res.minute, res.second) = self._parse_min_sec(value)
if idx + 4 < len_l and tokens[idx + 3] == ':':
res.second, res.microsecond = self._parsems(tokens[idx + 4])
idx += 2
idx += 2
elif idx + 1 < len_l and tokens[idx + 1] in ('-', '/', '.'):
sep = tokens[idx + 1]
ymd.append(value_repr)
if idx + 2 < len_l and not info.jump(tokens[idx + 2]):
if tokens[idx + 2].isdigit():
# 01-01[-01]
ymd.append(tokens[idx + 2])
else:
# 01-Jan[-01]
value = info.month(tokens[idx + 2])
if value is not None:
ymd.append(value, 'M')
else:
raise ValueError()
if idx + 3 < len_l and tokens[idx + 3] == sep:
# We have three members
value = info.month(tokens[idx + 4])
if value is not None:
ymd.append(value, 'M')
else:
ymd.append(tokens[idx + 4])
idx += 2
idx += 1
idx += 1
elif idx + 1 >= len_l or info.jump(tokens[idx + 1]):
if idx + 2 < len_l and info.ampm(tokens[idx + 2]) is not None:
# 12 am
hour = int(value)
res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 2]))
idx += 1
else:
# Year, month or day
ymd.append(value)
idx += 1
elif info.ampm(tokens[idx + 1]) is not None and (0 <= value < 24):
# 12am
hour = int(value)
res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 1]))
idx += 1
elif ymd.could_be_day(value):
ymd.append(value)
elif not fuzzy:
raise ValueError()
return idx
def _find_hms_idx(self, idx, tokens, info, allow_jump):
len_l = len(tokens)
if idx+1 < len_l and info.hms(tokens[idx+1]) is not None:
# There is an "h", "m", or "s" label following this token. We take
# assign the upcoming label to the current token.
# e.g. the "12" in 12h"
hms_idx = idx + 1
elif (allow_jump and idx+2 < len_l and tokens[idx+1] == ' ' and
info.hms(tokens[idx+2]) is not None):
# There is a space and then an "h", "m", or "s" label.
# e.g. the "12" in "12 h"
hms_idx = idx + 2
elif idx > 0 and info.hms(tokens[idx-1]) is not None:
# There is a "h", "m", or "s" preceding this token. Since neither
# of the previous cases was hit, there is no label following this
# token, so we use the previous label.
# e.g. the "04" in "12h04"
hms_idx = idx-1
elif (1 < idx == len_l-1 and tokens[idx-1] == ' ' and
info.hms(tokens[idx-2]) is not None):
# If we are looking at the final token, we allow for a
# backward-looking check to skip over a space.
# TODO: Are we sure this is the right condition here?
hms_idx = idx - 2
else:
hms_idx = None
return hms_idx
def _assign_hms(self, res, value_repr, hms):
# See GH issue #427, fixing float rounding
value = self._to_decimal(value_repr)
if hms == 0:
# Hour
res.hour = int(value)
if value % 1:
res.minute = int(60*(value % 1))
elif hms == 1:
(res.minute, res.second) = self._parse_min_sec(value)
elif hms == 2:
(res.second, res.microsecond) = self._parsems(value_repr)
def _could_be_tzname(self, hour, tzname, tzoffset, token):
return (hour is not None and
tzname is None and
tzoffset is None and
len(token) <= 5 and
(all(x in string.ascii_uppercase for x in token)
or token in self.info.UTCZONE))
def _ampm_valid(self, hour, ampm, fuzzy):
"""
For fuzzy parsing, 'a' or 'am' (both valid English words)
may erroneously trigger the AM/PM flag. Deal with that
here.
"""
val_is_ampm = True
# If there's already an AM/PM flag, this one isn't one.
if fuzzy and ampm is not None:
val_is_ampm = False
# If AM/PM is found and hour is not, raise a ValueError
if hour is None:
if fuzzy:
val_is_ampm = False
else:
raise ValueError('No hour specified with AM or PM flag.')
elif not 0 <= hour <= 12:
# If AM/PM is found, it's a 12 hour clock, so raise
# an error for invalid range
if fuzzy:
val_is_ampm = False
else:
raise ValueError('Invalid hour specified for 12-hour clock.')
return val_is_ampm
def _adjust_ampm(self, hour, ampm):
if hour < 12 and ampm == 1:
hour += 12
elif hour == 12 and ampm == 0:
hour = 0
return hour
def _parse_min_sec(self, value):
# TODO: Every usage of this function sets res.second to the return
# value. Are there any cases where second will be returned as None and
# we *don't* want to set res.second = None?
minute = int(value)
second = None
sec_remainder = value % 1
if sec_remainder:
second = int(60 * sec_remainder)
return (minute, second)
def _parse_hms(self, idx, tokens, info, hms_idx):
# TODO: Is this going to admit a lot of false-positives for when we
# just happen to have digits and "h", "m" or "s" characters in non-date
# text? I guess hex hashes won't have that problem, but there's plenty
# of random junk out there.
if hms_idx is None:
hms = None
new_idx = idx
elif hms_idx > idx:
hms = info.hms(tokens[hms_idx])
new_idx = hms_idx
else:
# Looking backwards, increment one.
hms = info.hms(tokens[hms_idx]) + 1
new_idx = idx
return (new_idx, hms)
# ------------------------------------------------------------------
# Handling for individual tokens. These are kept as methods instead
# of functions for the sake of customizability via subclassing.
def _parsems(self, value):
"""Parse a I[.F] seconds value into (seconds, microseconds)."""
if "." not in value:
return int(value), 0
else:
i, f = value.split(".")
return int(i), int(f.ljust(6, "0")[:6])
def _to_decimal(self, val):
try:
decimal_value = Decimal(val)
# See GH 662, edge case, infinite value should not be converted
# via `_to_decimal`
if not decimal_value.is_finite():
raise ValueError("Converted decimal value is infinite or NaN")
except Exception as e:
msg = "Could not convert %s to decimal" % val
six.raise_from(ValueError(msg), e)
else:
return decimal_value
# ------------------------------------------------------------------
# Post-Parsing construction of datetime output. These are kept as
# methods instead of functions for the sake of customizability via
# subclassing.
def _build_tzinfo(self, tzinfos, tzname, tzoffset):
if callable(tzinfos):
tzdata = tzinfos(tzname, tzoffset)
else:
tzdata = tzinfos.get(tzname)
# handle case where tzinfo is paased an options that returns None
# eg tzinfos = {'BRST' : None}
if isinstance(tzdata, datetime.tzinfo) or tzdata is None:
tzinfo = tzdata
elif isinstance(tzdata, text_type):
tzinfo = tz.tzstr(tzdata)
elif isinstance(tzdata, integer_types):
tzinfo = tz.tzoffset(tzname, tzdata)
else:
raise TypeError("Offset must be tzinfo subclass, tz string, "
"or int offset.")
return tzinfo
def _build_tzaware(self, naive, res, tzinfos):
if (callable(tzinfos) or (tzinfos and res.tzname in tzinfos)):
tzinfo = self._build_tzinfo(tzinfos, res.tzname, res.tzoffset)
aware = naive.replace(tzinfo=tzinfo)
aware = self._assign_tzname(aware, res.tzname)
elif res.tzname and res.tzname in time.tzname:
aware = naive.replace(tzinfo=tz.tzlocal())
# Handle ambiguous local datetime
aware = self._assign_tzname(aware, res.tzname)
# This is mostly relevant for winter GMT zones parsed in the UK
if (aware.tzname() != res.tzname and
res.tzname in self.info.UTCZONE):
aware = aware.replace(tzinfo=tz.UTC)
elif res.tzoffset == 0:
aware = naive.replace(tzinfo=tz.UTC)
elif res.tzoffset:
aware = naive.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset))
elif not res.tzname and not res.tzoffset:
# i.e. no timezone information was found.
aware = naive
elif res.tzname:
# tz-like string was parsed but we don't know what to do
# with it
warnings.warn("tzname {tzname} identified but not understood. "
"Pass `tzinfos` argument in order to correctly "
"return a timezone-aware datetime. In a future "
"version, this will raise an "
"exception.".format(tzname=res.tzname),
category=UnknownTimezoneWarning)
aware = naive
return aware
def _build_naive(self, res, default):
repl = {}
for attr in ("year", "month", "day", "hour",
"minute", "second", "microsecond"):
value = getattr(res, attr)
if value is not None:
repl[attr] = value
if 'day' not in repl:
# If the default day exceeds the last day of the month, fall back
# to the end of the month.
cyear = default.year if res.year is None else res.year
cmonth = default.month if res.month is None else res.month
cday = default.day if res.day is None else res.day
if cday > monthrange(cyear, cmonth)[1]:
repl['day'] = monthrange(cyear, cmonth)[1]
naive = default.replace(**repl)
if res.weekday is not None and not res.day:
naive = naive + relativedelta.relativedelta(weekday=res.weekday)
return naive
def _assign_tzname(self, dt, tzname):
if dt.tzname() != tzname:
new_dt = tz.enfold(dt, fold=1)
if new_dt.tzname() == tzname:
return new_dt
return dt
def _recombine_skipped(self, tokens, skipped_idxs):
"""
>>> tokens = ["foo", " ", "bar", " ", "19June2000", "baz"]
>>> skipped_idxs = [0, 1, 2, 5]
>>> _recombine_skipped(tokens, skipped_idxs)
["foo bar", "baz"]
"""
skipped_tokens = []
for i, idx in enumerate(sorted(skipped_idxs)):
if i > 0 and idx - 1 == skipped_idxs[i - 1]:
skipped_tokens[-1] = skipped_tokens[-1] + tokens[idx]
else:
skipped_tokens.append(tokens[idx])
return skipped_tokens
DEFAULTPARSER = parser()
def parse(timestr, parserinfo=None, **kwargs):
"""
Parse a string in one of the supported formats, using the
``parserinfo`` parameters.
:param timestr:
A string containing a date/time stamp.
:param parserinfo:
A :class:`parserinfo` object containing parameters for the parser.
If ``None``, the default arguments to the :class:`parserinfo`
constructor are used.
The ``**kwargs`` parameter takes the following keyword arguments:
:param default:
The default datetime object, if this is a datetime object and not
``None``, elements specified in ``timestr`` replace elements in the
default object.
:param ignoretz:
If set ``True``, time zones in parsed strings are ignored and a naive
:class:`datetime` object is returned.
:param tzinfos:
Additional time zone names / aliases which may be present in the
string. This argument maps time zone names (and optionally offsets
from those time zones) to time zones. This parameter can be a
dictionary with timezone aliases mapping time zone names to time
zones or a function taking two parameters (``tzname`` and
``tzoffset``) and returning a time zone.
The timezones to which the names are mapped can be an integer
offset from UTC in seconds or a :class:`tzinfo` object.
.. doctest::
:options: +NORMALIZE_WHITESPACE
>>> from dateutil.parser import parse
>>> from dateutil.tz import gettz
>>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")}
>>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200))
>>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21,
tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
This parameter is ignored if ``ignoretz`` is set.
:param dayfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
``yearfirst`` is set to ``True``, this distinguishes between YDM and
YMD. If set to ``None``, this value is retrieved from the current
:class:`parserinfo` object (which itself defaults to ``False``).
:param yearfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the year. If ``True``, the first number is taken to
be the year, otherwise the last number is taken to be the year. If
this is set to ``None``, the value is retrieved from the current
:class:`parserinfo` object (which itself defaults to ``False``).
:param fuzzy:
Whether to allow fuzzy parsing, allowing for string like "Today is
January 1, 2047 at 8:21:00AM".
:param fuzzy_with_tokens:
If ``True``, ``fuzzy`` is automatically set to True, and the parser
will return a tuple where the first element is the parsed
:class:`datetime.datetime` datetimestamp and the second element is
a tuple containing the portions of the string which were ignored:
.. doctest::
>>> from dateutil.parser import parse
>>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
(datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
:return:
Returns a :class:`datetime.datetime` object or, if the
``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
first element being a :class:`datetime.datetime` object, the second
a tuple containing the fuzzy tokens.
:raises ParserError:
Raised for invalid or unknown string formats, if the provided
:class:`tzinfo` is not in a valid format, or if an invalid date would
be created.
:raises OverflowError:
Raised if the parsed date exceeds the largest valid C integer on
your system.
"""
if parserinfo:
return parser(parserinfo).parse(timestr, **kwargs)
else:
return DEFAULTPARSER.parse(timestr, **kwargs)
class _tzparser(object):
class _result(_resultbase):
__slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset",
"start", "end"]
class _attr(_resultbase):
__slots__ = ["month", "week", "weekday",
"yday", "jyday", "day", "time"]
def __repr__(self):
return self._repr("")
def __init__(self):
_resultbase.__init__(self)
self.start = self._attr()
self.end = self._attr()
def parse(self, tzstr):
res = self._result()
l = [x for x in re.split(r'([,:.]|[a-zA-Z]+|[0-9]+)',tzstr) if x]
used_idxs = list()
try:
len_l = len(l)
i = 0
while i < len_l:
# BRST+3[BRDT[+2]]
j = i
while j < len_l and not [x for x in l[j]
if x in "0123456789:,-+"]:
j += 1
if j != i:
if not res.stdabbr:
offattr = "stdoffset"
res.stdabbr = "".join(l[i:j])
else:
offattr = "dstoffset"
res.dstabbr = "".join(l[i:j])
for ii in range(j):
used_idxs.append(ii)
i = j
if (i < len_l and (l[i] in ('+', '-') or l[i][0] in
"0123456789")):
if l[i] in ('+', '-'):
# Yes, that's right. See the TZ variable
# documentation.
signal = (1, -1)[l[i] == '+']
used_idxs.append(i)
i += 1
else:
signal = -1
len_li = len(l[i])
if len_li == 4:
# -0300
setattr(res, offattr, (int(l[i][:2]) * 3600 +
int(l[i][2:]) * 60) * signal)
elif i + 1 < len_l and l[i + 1] == ':':
# -03:00
setattr(res, offattr,
(int(l[i]) * 3600 +
int(l[i + 2]) * 60) * signal)
used_idxs.append(i)
i += 2
elif len_li <= 2:
# -[0]3
setattr(res, offattr,
int(l[i][:2]) * 3600 * signal)
else:
return None
used_idxs.append(i)
i += 1
if res.dstabbr:
break
else:
break
if i < len_l:
for j in range(i, len_l):
if l[j] == ';':
l[j] = ','
assert l[i] == ','
i += 1
if i >= len_l:
pass
elif (8 <= l.count(',') <= 9 and
not [y for x in l[i:] if x != ','
for y in x if y not in "0123456789+-"]):
# GMT0BST,3,0,30,3600,10,0,26,7200[,3600]
for x in (res.start, res.end):
x.month = int(l[i])
used_idxs.append(i)
i += 2
if l[i] == '-':
value = int(l[i + 1]) * -1
used_idxs.append(i)
i += 1
else:
value = int(l[i])
used_idxs.append(i)
i += 2
if value:
x.week = value
x.weekday = (int(l[i]) - 1) % 7
else:
x.day = int(l[i])
used_idxs.append(i)
i += 2
x.time = int(l[i])
used_idxs.append(i)
i += 2
if i < len_l:
if l[i] in ('-', '+'):
signal = (-1, 1)[l[i] == "+"]
used_idxs.append(i)
i += 1
else:
signal = 1
used_idxs.append(i)
res.dstoffset = (res.stdoffset + int(l[i]) * signal)
# This was a made-up format that is not in normal use
warn(('Parsed time zone "%s"' % tzstr) +
'is in a non-standard dateutil-specific format, which ' +
'is now deprecated; support for parsing this format ' +
'will be removed in future versions. It is recommended ' +
'that you switch to a standard format like the GNU ' +
'TZ variable format.', tz.DeprecatedTzFormatWarning)
elif (l.count(',') == 2 and l[i:].count('/') <= 2 and
not [y for x in l[i:] if x not in (',', '/', 'J', 'M',
'.', '-', ':')
for y in x if y not in "0123456789"]):
for x in (res.start, res.end):
if l[i] == 'J':
# non-leap year day (1 based)
used_idxs.append(i)
i += 1
x.jyday = int(l[i])
elif l[i] == 'M':
# month[-.]week[-.]weekday
used_idxs.append(i)
i += 1
x.month = int(l[i])
used_idxs.append(i)
i += 1
assert l[i] in ('-', '.')
used_idxs.append(i)
i += 1
x.week = int(l[i])
if x.week == 5:
x.week = -1
used_idxs.append(i)
i += 1
assert l[i] in ('-', '.')
used_idxs.append(i)
i += 1
x.weekday = (int(l[i]) - 1) % 7
else:
# year day (zero based)
x.yday = int(l[i]) + 1
used_idxs.append(i)
i += 1
if i < len_l and l[i] == '/':
used_idxs.append(i)
i += 1
# start time
len_li = len(l[i])
if len_li == 4:
# -0300
x.time = (int(l[i][:2]) * 3600 +
int(l[i][2:]) * 60)
elif i + 1 < len_l and l[i + 1] == ':':
# -03:00
x.time = int(l[i]) * 3600 + int(l[i + 2]) * 60
used_idxs.append(i)
i += 2
if i + 1 < len_l and l[i + 1] == ':':
used_idxs.append(i)
i += 2
x.time += int(l[i])
elif len_li <= 2:
# -[0]3
x.time = (int(l[i][:2]) * 3600)
else:
return None
used_idxs.append(i)
i += 1
assert i == len_l or l[i] == ','
i += 1
assert i >= len_l
except (IndexError, ValueError, AssertionError):
return None
unused_idxs = set(range(len_l)).difference(used_idxs)
res.any_unused_tokens = not {l[n] for n in unused_idxs}.issubset({",",":"})
return res
DEFAULTTZPARSER = _tzparser()
def _parsetz(tzstr):
return DEFAULTTZPARSER.parse(tzstr)
class ParserError(ValueError):
"""Exception subclass used for any failure to parse a datetime string.
This is a subclass of :py:exc:`ValueError`, and should be raised any time
earlier versions of ``dateutil`` would have raised ``ValueError``.
.. versionadded:: 2.8.1
"""
def __str__(self):
try:
return self.args[0] % self.args[1:]
except (TypeError, IndexError):
return super(ParserError, self).__str__()
def __repr__(self):
args = ", ".join("'%s'" % arg for arg in self.args)
return "%s(%s)" % (self.__class__.__name__, args)
class UnknownTimezoneWarning(RuntimeWarning):
"""Raised when the parser finds a timezone it cannot parse into a tzinfo.
.. versionadded:: 2.7.0
"""
# vim:ts=4:sw=4:et
| gpl-3.0 |
kashif/scikit-learn | examples/feature_selection/plot_f_test_vs_mi.py | 75 | 1647 | """
===========================================
Comparison of F-test and mutual information
===========================================
This example illustrates the differences between univariate F-test statistics
and mutual information.
We consider 3 features x_1, x_2, x_3 distributed uniformly over [0, 1], the
target depends on them as follows:
y = x_1 + sin(6 * pi * x_2) + 0.1 * N(0, 1), that is the third features is completely irrelevant.
The code below plots the dependency of y against individual x_i and normalized
values of univariate F-tests statistics and mutual information.
As F-test captures only linear dependency, it rates x_1 as the most
discriminative feature. On the other hand, mutual information can capture any
kind of dependency between variables and it rates x_2 as the most
discriminative feature, which probably agrees better with our intuitive
perception for this example. Both methods correctly marks x_3 as irrelevant.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_selection import f_regression, mutual_info_regression
np.random.seed(0)
X = np.random.rand(1000, 3)
y = X[:, 0] + np.sin(6 * np.pi * X[:, 1]) + 0.1 * np.random.randn(1000)
f_test, _ = f_regression(X, y)
f_test /= np.max(f_test)
mi = mutual_info_regression(X, y)
mi /= np.max(mi)
plt.figure(figsize=(15, 5))
for i in range(3):
plt.subplot(1, 3, i + 1)
plt.scatter(X[:, i], y)
plt.xlabel("$x_{}$".format(i + 1), fontsize=14)
if i == 0:
plt.ylabel("$y$", fontsize=14)
plt.title("F-test={:.2f}, MI={:.2f}".format(f_test[i], mi[i]),
fontsize=16)
plt.show()
| bsd-3-clause |
zaxtax/scikit-learn | sklearn/tree/tree.py | 23 | 40423 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta
from abc import abstractmethod
from math import ceil
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array
from ..utils import check_random_state
from ..utils import compute_sample_weight
from ..utils.multiclass import check_classification_targets
from ..exceptions import NotFittedError
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import DepthFirstTreeBuilder
from ._tree import BestFirstTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None,
presort=False):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.presort = presort
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
y = check_array(y, ensure_2d=False, dtype=None)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
check_classification_targets(y)
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_encoded = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_encoded[:, k] = np.unique(y[:, k],
return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)):
min_samples_leaf = self.min_samples_leaf
else: # float
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
min_samples_split = self.min_samples_split
else: # float
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if not (0. < self.min_samples_split <= 1. or
2 <= self.min_samples_split):
raise ValueError("min_samples_split must be in at least 2"
" or in (0, 1], got %s" % min_samples_split)
if not (0. < self.min_samples_leaf <= 0.5 or
1 <= self.min_samples_leaf):
raise ValueError("min_samples_leaf must be at least than 1 "
"or in (0, 0.5], got %s" % min_samples_leaf)
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort is True and issparse(X):
raise ValueError("Presorting is not supported for sparse "
"matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms
# which desire presorting must do presorting themselves and pass that
# matrix into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
self.presort)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is %s and "
"input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def decision_path(self, X, check_input=True):
"""Return the decision path in the tree
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.decision_path(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None,
presort=False):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
presort=presort)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
presort=False):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
presort=presort)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| bsd-3-clause |
anjalisood/spark-tk | regression-tests/sparktkregtests/testcases/graph/closeness_centrality_test.py | 6 | 5663 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests closeness centrality algorithm for graphs"""
import unittest
from sparktkregtests.lib import sparktk_test
class ClosenessCentrality(sparktk_test.SparkTKTestCase):
def setUp(self):
edges = self.context.frame.create(
[(0, 1, 1),
(0, 2, 1),
(2, 3, 2),
(2, 4, 4),
(3, 4, 2),
(3, 5, 4),
(4, 5, 2),
(4, 6, 1)],
["src", "dst", "weights"])
vertices = self.context.frame.create(
[[0], [1], [2], [3], [4], [5], [6]], ["id"])
self.graph = self.context.graph.create(vertices, edges)
def test_default(self):
"""Test default settings"""
result_frame = self.graph.closeness_centrality(normalize=False)
result = result_frame.to_pandas()
#validate centrality values
expected_values = {0 : 0.5,
1: 0.0,
2: 0.667,
3: 0.75,
4: 1.0,
5: 0.0,
6: 0.0}
self._validate_result(result, expected_values)
def test_weights_single_shortest_path(self):
"""Tests weighted closeness when only one shortest path present"""
edges = self.context.frame.create(
[(0,1,3), (0, 2, 2),
(0, 3, 6), (0, 4, 4),
(1, 3, 5), (1, 5, 5),
(2, 4, 1), (3, 4, 2),
(3, 5, 1), (4, 5, 4)],
["src", "dst", "weights"])
vertices = self.context.frame.create([[0], [1], [2], [3], [4], [5]], ["id"])
graph = self.context.graph.create(vertices, edges)
#validate centrality values
result_frame = graph.closeness_centrality("weights", False)
result = result_frame.to_pandas()
expected_values = {0 : 0.238,
1: 0.176,
2: 0.333,
3: 0.667,
4: 0.25,
5: 0.0}
self._validate_result(result, expected_values)
def test_weights_multiple_shortest_paths(self):
"""Test centrality when multiple shortest paths exist"""
result_frame = self.graph.closeness_centrality("weights", False)
#validate centrality values
expected_values = {0 : 0.261,
1: 0.0,
2: 0.235,
3: 0.333,
4: 0.667,
5: 0.0,
6: 0.0}
result = result_frame.to_pandas()
self._validate_result(result, expected_values)
def test_disconnected_edges(self):
"""Test closeness on graph with disconnected edges"""
edges = self.context.frame.create(
[['a', 'b'], ['a', 'c'],
['c', 'd'], ['c', 'e'],
['f', 'g'], ['g', 'h']],
['src', 'dst'])
vertices = self.context.frame.create(
[['a'], ['b'], ['c'], ['d'], ['e'], ['f'], ['g'], ['h']],
['id'])
graph = self.context.graph.create(vertices, edges)
result_frame = graph.closeness_centrality(normalize=False)
#validate centrality values
expected_values = {'a': 0.667,
'b': 0.0, 'c': 1.0, 'd': 0.0,
'e': 0.0, 'f': 0.667, 'g': 1.0, 'h':0.0}
result = result_frame.to_pandas()
self._validate_result(result, expected_values)
def test_normalize(self):
"""Test normalized centrality"""
result_frame = self.graph.closeness_centrality(normalize=True)
result = result_frame.to_pandas()
#validate centrality values
expected_values = {0 : 0.5,
1: 0.0,
2: 0.444,
3: 0.375,
4: 0.333,
5: 0.0,
6: 0.0}
self._validate_result(result, expected_values)
def test_negative_edges(self):
"""Test closeness on graph with disconnected edges"""
edges = self.context.frame.create(
[['a', 'b', 10], ['a', 'c', 12],
['c', 'd', -1], ['c', 'e', 5]],
['src', 'dst', 'weight'])
vertices = self.context.frame.create(
[['a'], ['b'], ['c'], ['d'], ['e']],
['id'])
graph = self.context.graph.create(vertices, edges)
with self.assertRaisesRegexp(
Exception, "edge weight cannot be negative"):
graph.closeness_centrality(
edge_weight='weight',
normalize=False)
def test_bad_weights_column_name(self):
"""Should throw exception when bad weights column name given"""
with self.assertRaisesRegexp(
Exception, "Field \"BAD\" does not exist"):
self.graph.closeness_centrality("BAD")
def _validate_result(self, result, expected_values):
for i, row in result.iterrows():
id = row['id']
self.assertAlmostEqual(
row["closeness_centrality"],
expected_values[id],
delta = 0.1)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
unioslo/cerebrum | testsuite/docker/dev-config/ipython_config.py | 1 | 22672 | # Configuration file for ipython.
#------------------------------------------------------------------------------
# InteractiveShellApp(Configurable) configuration
#------------------------------------------------------------------------------
## A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
## Execute the given command string.
#c.InteractiveShellApp.code_to_run = ''
## Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
#c.InteractiveShellApp.exec_PYTHONSTARTUP = True
## List of files to run at IPython startup.
#c.InteractiveShellApp.exec_files = []
## lines of code to run at IPython startup.
c.InteractiveShellApp.exec_lines = ['%autoreload 2']
## A list of dotted module names of IPython extensions to load.
c.InteractiveShellApp.extensions = ['autoreload']
## dotted module name of an IPython extension to load.
#c.InteractiveShellApp.extra_extension = ''
## A file to be run
#c.InteractiveShellApp.file_to_run = ''
## Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk2', 'gtk3',
# 'osx', 'pyglet', 'qt', 'qt4', 'qt5', 'tk', 'wx', 'gtk2', 'qt4').
#c.InteractiveShellApp.gui = None
## Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
#c.InteractiveShellApp.hide_initial_ns = True
## Configure matplotlib for interactive use with the default matplotlib backend.
#c.InteractiveShellApp.matplotlib = None
## Run the module as a script.
#c.InteractiveShellApp.module_to_run = ''
## Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
#c.InteractiveShellApp.pylab = None
## If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
#c.InteractiveShellApp.pylab_import_all = True
## Reraise exceptions encountered loading IPython extensions?
#c.InteractiveShellApp.reraise_ipython_extension_failures = False
#------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## This is an application.
## The date format used by logging formatters for %(asctime)s
#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
#c.Application.log_level = 30
#------------------------------------------------------------------------------
# BaseIPythonApplication(Application) configuration
#------------------------------------------------------------------------------
## IPython: an enhanced interactive Python shell.
## Whether to create profile dir if it doesn't exist
#c.BaseIPythonApplication.auto_create = False
## Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
#c.BaseIPythonApplication.copy_config_files = False
## Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
#c.BaseIPythonApplication.extra_config_file = u''
## The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
#c.BaseIPythonApplication.ipython_dir = u''
## Whether to overwrite existing config files when copying
#c.BaseIPythonApplication.overwrite = False
## The IPython profile to use.
#c.BaseIPythonApplication.profile = u'default'
## Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
#c.BaseIPythonApplication.verbose_crash = False
#------------------------------------------------------------------------------
# TerminalIPythonApp(BaseIPythonApplication,InteractiveShellApp) configuration
#------------------------------------------------------------------------------
## Whether to display a banner upon starting IPython.
#c.TerminalIPythonApp.display_banner = True
## If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
#c.TerminalIPythonApp.force_interact = False
## Class to use to instantiate the TerminalInteractiveShell object. Useful for
# custom Frontends
#c.TerminalIPythonApp.interactive_shell_class = 'IPython.terminal.interactiveshell.TerminalInteractiveShell'
## Start IPython quickly by skipping the loading of config files.
#c.TerminalIPythonApp.quick = False
#------------------------------------------------------------------------------
# InteractiveShell(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## An enhanced, interactive shell for Python.
## 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
#c.InteractiveShell.ast_node_interactivity = 'last_expr'
## A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
#c.InteractiveShell.ast_transformers = []
## Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
#c.InteractiveShell.autocall = 0
## Autoindent IPython code entered interactively.
#c.InteractiveShell.autoindent = True
## Enable magic commands to be called without the leading %.
#c.InteractiveShell.automagic = True
## The part of the banner to be printed before the profile
#c.InteractiveShell.banner1 = 'Python 2.7.14 (default, Sep 18 2017, 20:56:43) \nType "copyright", "credits" or "license" for more information.\n\nIPython 5.5.0 -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
## The part of the banner to be printed after the profile
#c.InteractiveShell.banner2 = ''
## Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
#c.InteractiveShell.cache_size = 1000
## Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
#c.InteractiveShell.color_info = True
## Set the color scheme (NoColor, Neutral, Linux, or LightBG).
#c.InteractiveShell.colors = 'Neutral'
##
#c.InteractiveShell.debug = False
## **Deprecated**
#
# Will be removed in IPython 6.0
#
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). `deep_reload`
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
#c.InteractiveShell.deep_reload = False
## Don't call post-execute functions that have failed in the past.
#c.InteractiveShell.disable_failing_post_execute = False
## If True, anything that would be passed to the pager will be displayed as
# regular output instead.
#c.InteractiveShell.display_page = False
## (Provisional API) enables html representation in mime bundles sent to pagers.
#c.InteractiveShell.enable_html_pager = False
## Total length of command history
#c.InteractiveShell.history_length = 10000
## The number of saved history entries to be loaded into the history buffer at
# startup.
#c.InteractiveShell.history_load_length = 1000
##
#c.InteractiveShell.ipython_dir = ''
## Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
#c.InteractiveShell.logappend = ''
## The name of the logfile to use.
#c.InteractiveShell.logfile = ''
## Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
#c.InteractiveShell.logstart = False
##
#c.InteractiveShell.object_info_string_level = 0
## Automatically call the pdb debugger after every exception.
#c.InteractiveShell.pdb = False
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
#c.InteractiveShell.prompt_in1 = 'In [\\#]: '
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
#c.InteractiveShell.prompt_in2 = ' .\\D.: '
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
#c.InteractiveShell.prompt_out = 'Out[\\#]: '
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
#c.InteractiveShell.prompts_pad_left = True
##
#c.InteractiveShell.quiet = False
##
#c.InteractiveShell.separate_in = '\n'
##
#c.InteractiveShell.separate_out = ''
##
#c.InteractiveShell.separate_out2 = ''
## Show rewritten input, e.g. for autocall.
#c.InteractiveShell.show_rewritten_input = True
## Enables rich html representation of docstrings. (This requires the docrepr
# module).
#c.InteractiveShell.sphinxify_docstring = False
##
#c.InteractiveShell.wildcards_case_sensitive = True
##
#c.InteractiveShell.xmode = 'Context'
#------------------------------------------------------------------------------
# TerminalInteractiveShell(InteractiveShell) configuration
#------------------------------------------------------------------------------
## Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
#c.TerminalInteractiveShell.confirm_exit = True
## Options for displaying tab completions, 'column', 'multicolumn', and
# 'readlinelike'. These options are for `prompt_toolkit`, see `prompt_toolkit`
# documentation for more information.
#c.TerminalInteractiveShell.display_completions = 'multicolumn'
## Shortcut style to use at the prompt. 'vi' or 'emacs'.
#c.TerminalInteractiveShell.editing_mode = 'emacs'
## Set the editor used by IPython (default to $EDITOR/vi/notepad).
#c.TerminalInteractiveShell.editor = 'vi'
## Enable vi (v) or Emacs (C-X C-E) shortcuts to open an external editor. This is
# in addition to the F2 binding, which is always enabled.
#c.TerminalInteractiveShell.extra_open_editor_shortcuts = False
## Highlight matching brackets.
#c.TerminalInteractiveShell.highlight_matching_brackets = True
## The name or class of a Pygments style to use for syntax
# highlighting:
# manni, igor, lovelace, xcode, vim, autumn, abap, vs, rrt, native, perldoc, borland, arduino, tango, emacs, friendly, monokai, paraiso-dark, colorful, murphy, bw, pastie, rainbow_dash, algol_nu, paraiso-light, trac, default, algol, fruity
#c.TerminalInteractiveShell.highlighting_style = traitlets.Undefined
## Override highlighting format for specific tokens
#c.TerminalInteractiveShell.highlighting_style_overrides = {}
## Enable mouse support in the prompt
#c.TerminalInteractiveShell.mouse_support = False
## Class used to generate Prompt token for prompt_toolkit
#c.TerminalInteractiveShell.prompts_class = 'IPython.terminal.prompts.Prompts'
## Use `raw_input` for the REPL, without completion and prompt colors.
#
# Useful when controlling IPython as a subprocess, and piping STDIN/OUT/ERR.
# Known usage are: IPython own testing machinery, and emacs inferior-shell
# integration through elpy.
#
# This mode default to `True` if the `IPY_TEST_SIMPLE_PROMPT` environment
# variable is set, or the current terminal is not a tty.
#c.TerminalInteractiveShell.simple_prompt = False
## Number of line at the bottom of the screen to reserve for the completion menu
#c.TerminalInteractiveShell.space_for_menu = 6
## Automatically set the terminal title
#c.TerminalInteractiveShell.term_title = True
## Use 24bit colors instead of 256 colors in prompt highlighting. If your
# terminal supports true color, the following command should print 'TRUECOLOR'
# in orange: printf "\x1b[38;2;255;100;0mTRUECOLOR\x1b[0m\n"
#c.TerminalInteractiveShell.true_color = False
#------------------------------------------------------------------------------
# HistoryAccessor(HistoryAccessorBase) configuration
#------------------------------------------------------------------------------
## Access the history database without adding to it.
#
# This is intended for use by standalone history tools. IPython shells use
# HistoryManager, below, which is a subclass of this.
## Options for configuring the SQLite connection
#
# These options are passed as keyword args to sqlite3.connect when establishing
# database conenctions.
#c.HistoryAccessor.connection_options = {}
## enable the SQLite history
#
# set enabled=False to disable the SQLite history, in which case there will be
# no stored history, no SQLite connection, and no background saving thread.
# This may be necessary in some threaded environments where IPython is embedded.
#c.HistoryAccessor.enabled = True
## Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
#
# you can also use the specific value `:memory:` (including the colon at both
# end but not the back ticks), to avoid creating an history file.
#c.HistoryAccessor.hist_file = u''
#------------------------------------------------------------------------------
# HistoryManager(HistoryAccessor) configuration
#------------------------------------------------------------------------------
## A class to organize all history-related functionality in one place.
## Write to database every x commands (higher values save disk access & power).
# Values of 1 or less effectively disable caching.
#c.HistoryManager.db_cache_size = 0
## Should the history database include output? (default: no)
#c.HistoryManager.db_log_output = False
#------------------------------------------------------------------------------
# ProfileDir(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
## Set the profile location directly. This overrides the logic used by the
# `profile` option.
#c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# BaseFormatter(Configurable) configuration
#------------------------------------------------------------------------------
## A base formatter class that is configurable.
#
# This formatter should usually be used as the base class of all formatters. It
# is a traited :class:`Configurable` class and includes an extensible API for
# users to determine how their objects are formatted. The following logic is
# used to find a function to format an given object.
#
# 1. The object is introspected to see if it has a method with the name
# :attr:`print_method`. If is does, that object is passed to that method
# for formatting.
# 2. If no print method is found, three internal dictionaries are consulted
# to find print method: :attr:`singleton_printers`, :attr:`type_printers`
# and :attr:`deferred_printers`.
#
# Users should use these dictionaries to register functions that will be used to
# compute the format data for their objects (if those objects don't have the
# special print methods). The easiest way of using these dictionaries is through
# the :meth:`for_type` and :meth:`for_type_by_name` methods.
#
# If no function/callable is found to compute the format data, ``None`` is
# returned and this format type is not used.
##
#c.BaseFormatter.deferred_printers = {}
##
#c.BaseFormatter.enabled = True
##
#c.BaseFormatter.singleton_printers = {}
##
#c.BaseFormatter.type_printers = {}
#------------------------------------------------------------------------------
# PlainTextFormatter(BaseFormatter) configuration
#------------------------------------------------------------------------------
## The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
##
#c.PlainTextFormatter.float_precision = ''
## Truncate large collections (lists, dicts, tuples, sets) to this size.
#
# Set to 0 to disable truncation.
#c.PlainTextFormatter.max_seq_length = 1000
##
#c.PlainTextFormatter.max_width = 79
##
#c.PlainTextFormatter.newline = '\n'
##
#c.PlainTextFormatter.pprint = True
##
#c.PlainTextFormatter.verbose = False
#------------------------------------------------------------------------------
# Completer(Configurable) configuration
#------------------------------------------------------------------------------
## Enable unicode completions, e.g. \alpha<tab> . Includes completion of latex
# commands, unicode names, and expanding unicode characters back to latex
# commands.
#c.Completer.backslash_combining_completions = True
## Activate greedy completion PENDING DEPRECTION. this is now mostly taken care
# of with Jedi.
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
#c.Completer.greedy = False
#------------------------------------------------------------------------------
# IPCompleter(Completer) configuration
#------------------------------------------------------------------------------
## Extension of the completer class with IPython-specific features
## DEPRECATED as of version 5.0.
#
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
#c.IPCompleter.limit_to__all__ = False
## Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
#c.IPCompleter.merge_completions = True
## Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
#c.IPCompleter.omit__names = 2
#------------------------------------------------------------------------------
# ScriptMagics(Magics) configuration
#------------------------------------------------------------------------------
## Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
## Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_paths
#c.ScriptMagics.script_magics = []
## Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
#c.ScriptMagics.script_paths = {}
#------------------------------------------------------------------------------
# StoreMagics(Magics) configuration
#------------------------------------------------------------------------------
## Lightweight persistence for python variables.
#
# Provides the %store magic.
## If True, any %store-d variables will be automatically restored when IPython
# starts.
#c.StoreMagics.autorestore = False
| gpl-2.0 |
jaidevd/scikit-learn | sklearn/neighbors/tests/test_neighbors.py | 49 | 46769 | from itertools import product
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater
from sklearn.utils.validation import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import neighbors, datasets
from sklearn.exceptions import DataConversionWarning
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_precomputed(random_state=42):
"""Tests unsupervised NearestNeighbors with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(random_state)
X = rng.random_sample((10, 4))
Y = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X, metric='euclidean')
DYX = metrics.pairwise_distances(Y, X, metric='euclidean')
for method in ['kneighbors']:
# TODO: also test radius_neighbors, but requires different assertion
# As a feature matrix (n_samples by n_features)
nbrs_X = neighbors.NearestNeighbors(n_neighbors=3)
nbrs_X.fit(X)
dist_X, ind_X = getattr(nbrs_X, method)(Y)
# As a dense distance matrix (n_samples by n_samples)
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='brute',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check auto works too
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check X=None in prediction
dist_X, ind_X = getattr(nbrs_X, method)(None)
dist_D, ind_D = getattr(nbrs_D, method)(None)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Must raise a ValueError if the matrix is not of correct shape
assert_raises(ValueError, getattr(nbrs_D, method), X)
target = np.arange(X.shape[0])
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
print(Est)
est = Est(metric='euclidean')
est.radius = est.n_neighbors = 1
pred_X = est.fit(X, target).predict(Y)
est.metric = 'precomputed'
pred_D = est.fit(DXX, target).predict(DYX)
assert_array_almost_equal(pred_X, pred_D)
def test_precomputed_cross_validation():
# Ensure array is split correctly
rng = np.random.RandomState(0)
X = rng.rand(20, 2)
D = pairwise_distances(X, metric='euclidean')
y = rng.randint(3, size=20)
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
metric_score = cross_val_score(Est(), X, y)
precomp_score = cross_val_score(Est(metric='precomputed'), D, y)
assert_array_equal(metric_score, precomp_score)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0], [0.99, 0.99],
[0.98, 0.98], [2.01, 2.01]])
y = np.array([1, 2, 1, 1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.4, 1.4], [1.01, 1.01], [2.01, 2.01]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([-1, 1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([[0.0]], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_greater(np.mean(rgs.predict(iris.data).round() == iris.target),
0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity',
include_self=True)
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity',
include_self=True)
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity',
include_self=True)
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity',
include_self=True)
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[[]])
if (isinstance(cls, neighbors.KNeighborsClassifier) or
isinstance(cls, neighbors.KNeighborsRegressor)):
nbrs = cls(n_neighbors=-1)
assert_raises(ValueError, nbrs.fit, X, y)
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError, nbrs.kneighbors_graph, X, mode='blah')
assert_raises(ValueError, nbrs.radius_neighbors_graph, X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = {}
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results[algorithm] = neigh.kneighbors(test, return_distance=True)
assert_array_almost_equal(results['brute'][0], results['ball_tree'][0])
assert_array_almost_equal(results['brute'][1], results['ball_tree'][1])
if 'kd_tree' in results:
assert_array_almost_equal(results['brute'][0],
results['kd_tree'][0])
assert_array_almost_equal(results['brute'][1],
results['kd_tree'][1])
def test_callable_metric():
def custom_metric(x1, x2):
return np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto',
metric=custom_metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute',
metric=custom_metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_metric_params_interface():
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric, mode='connectivity',
include_self=True).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric, mode='connectivity',
include_self=True).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).A)
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_same_knn_parallel():
X, y = datasets.make_classification(n_samples=30, n_features=5,
n_redundant=0, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y)
def check_same_knn_parallel(algorithm):
clf = neighbors.KNeighborsClassifier(n_neighbors=3,
algorithm=algorithm)
clf.fit(X_train, y_train)
y = clf.predict(X_test)
dist, ind = clf.kneighbors(X_test)
graph = clf.kneighbors_graph(X_test, mode='distance').toarray()
clf.set_params(n_jobs=3)
clf.fit(X_train, y_train)
y_parallel = clf.predict(X_test)
dist_parallel, ind_parallel = clf.kneighbors(X_test)
graph_parallel = \
clf.kneighbors_graph(X_test, mode='distance').toarray()
assert_array_equal(y, y_parallel)
assert_array_almost_equal(dist, dist_parallel)
assert_array_equal(ind, ind_parallel)
assert_array_almost_equal(graph, graph_parallel)
for algorithm in ALGORITHMS:
yield check_same_knn_parallel, algorithm
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
# ignore conversion to boolean in pairwise_distances
@ignore_warnings(category=DataConversionWarning)
def test_pairwise_boolean_distance():
# Non-regression test for #4523
# 'brute': uses scipy.spatial.distance through pairwise_distances
# 'ball_tree': uses sklearn.neighbors.dist_metrics
rng = np.random.RandomState(0)
X = rng.uniform(size=(6, 5))
NN = neighbors.NearestNeighbors
nn1 = NN(metric="jaccard", algorithm='brute').fit(X)
nn2 = NN(metric="jaccard", algorithm='ball_tree').fit(X)
assert_array_equal(nn1.kneighbors(X)[0], nn2.kneighbors(X)[0])
| bsd-3-clause |
cactusbin/nyt | matplotlib/lib/matplotlib/__init__.py | 1 | 41315 | """
This is an object-oriented plotting library.
A procedural interface is provided by the companion pyplot module,
which may be imported directly, e.g.::
from matplotlib.pyplot import *
To include numpy functions too, use::
from pylab import *
or using ipython::
ipython -pylab
For the most part, direct use of the object-oriented library is
encouraged when programming; pyplot is primarily for working
interactively. The
exceptions are the pyplot commands :func:`~matplotlib.pyplot.figure`,
:func:`~matplotlib.pyplot.subplot`,
:func:`~matplotlib.pyplot.subplots`,
:func:`~matplotlib.backends.backend_qt4agg.show`, and
:func:`~pyplot.savefig`, which can greatly simplify scripting.
Modules include:
:mod:`matplotlib.axes`
defines the :class:`~matplotlib.axes.Axes` class. Most pylab
commands are wrappers for :class:`~matplotlib.axes.Axes`
methods. The axes module is the highest level of OO access to
the library.
:mod:`matplotlib.figure`
defines the :class:`~matplotlib.figure.Figure` class.
:mod:`matplotlib.artist`
defines the :class:`~matplotlib.artist.Artist` base class for
all classes that draw things.
:mod:`matplotlib.lines`
defines the :class:`~matplotlib.lines.Line2D` class for
drawing lines and markers
:mod:`matplotlib.patches`
defines classes for drawing polygons
:mod:`matplotlib.text`
defines the :class:`~matplotlib.text.Text`,
:class:`~matplotlib.text.TextWithDash`, and
:class:`~matplotlib.text.Annotate` classes
:mod:`matplotlib.image`
defines the :class:`~matplotlib.image.AxesImage` and
:class:`~matplotlib.image.FigureImage` classes
:mod:`matplotlib.collections`
classes for efficient drawing of groups of lines or polygons
:mod:`matplotlib.colors`
classes for interpreting color specifications and for making
colormaps
:mod:`matplotlib.cm`
colormaps and the :class:`~matplotlib.image.ScalarMappable`
mixin class for providing color mapping functionality to other
classes
:mod:`matplotlib.ticker`
classes for calculating tick mark locations and for formatting
tick labels
:mod:`matplotlib.backends`
a subpackage with modules for various gui libraries and output
formats
The base matplotlib namespace includes:
:data:`~matplotlib.rcParams`
a global dictionary of default configuration settings. It is
initialized by code which may be overridded by a matplotlibrc
file.
:func:`~matplotlib.rc`
a function for setting groups of rcParams values
:func:`~matplotlib.use`
a function for setting the matplotlib backend. If used, this
function must be called immediately after importing matplotlib
for the first time. In particular, it must be called
**before** importing pylab (if pylab is imported).
matplotlib was initially written by John D. Hunter (1968-2012) and is now
developed and maintained by a host of others.
Occasionally the internal documentation (python docstrings) will refer
to MATLAB®, a registered trademark of The MathWorks, Inc.
"""
from __future__ import print_function
import sys
__version__ = '1.3.0'
__version__numpy__ = '1.5' # minimum required numpy version
try:
import dateutil
except ImportError:
raise ImportError("matplotlib requires dateutil")
try:
import pyparsing
except ImportError:
raise ImportError("matplotlib requires pyparsing")
else:
_required = [1, 5, 6]
if [int(x) for x in pyparsing.__version__.split('.')] < _required:
raise ImportError(
"matplotlib requires pyparsing >= {0}".format(
'.'.join(str(x) for x in _required)))
import os, re, shutil, warnings
import distutils.sysconfig
import distutils.version
# cbook must import matplotlib only within function
# definitions, so it is safe to import from it here.
from matplotlib.cbook import is_string_like
from matplotlib.compat import subprocess
try:
reload
except NameError:
# Python 3
from imp import reload
# Needed for toolkit setuptools support
if 0:
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
pass # must not have setuptools
if not hasattr(sys, 'argv'): # for modpython
sys.argv = ['modpython']
import sys, os, tempfile
if sys.version_info[0] >= 3:
def ascii(s): return bytes(s, 'ascii')
def byte2str(b): return b.decode('ascii')
else:
ascii = str
def byte2str(b): return b
from matplotlib.rcsetup import (defaultParams,
validate_backend,
validate_toolbar)
major, minor1, minor2, s, tmp = sys.version_info
_python24 = (major == 2 and minor1 >= 4) or major >= 3
# the havedate check was a legacy from old matplotlib which preceeded
# datetime support
_havedate = True
#try:
# import pkg_resources # pkg_resources is part of setuptools
#except ImportError: _have_pkg_resources = False
#else: _have_pkg_resources = True
if not _python24:
raise ImportError('matplotlib requires Python 2.4 or later')
import numpy
from distutils import version
expected_version = version.LooseVersion(__version__numpy__)
found_version = version.LooseVersion(numpy.__version__)
if not found_version >= expected_version:
raise ImportError(
'numpy %s or later is required; you have %s' % (
__version__numpy__, numpy.__version__))
del version
def _is_writable_dir(p):
"""
p is a string pointing to a putative writable dir -- return True p
is such a string, else False
"""
try:
p + '' # test is string like
except TypeError:
return False
# Test whether the operating system thinks it's a writable directory.
# Note that this check is necessary on Google App Engine, because the
# subsequent check will succeed even though p may not be writable.
if not os.access(p, os.W_OK) or not os.path.isdir(p):
return False
# Also test that it is actually possible to write to a file here.
try:
t = tempfile.TemporaryFile(dir=p)
try:
t.write(ascii('1'))
finally:
t.close()
except OSError:
return False
return True
class Verbose:
"""
A class to handle reporting. Set the fileo attribute to any file
instance to handle the output. Default is sys.stdout
"""
levels = ('silent', 'helpful', 'debug', 'debug-annoying')
vald = dict( [(level, i) for i,level in enumerate(levels)])
# parse the verbosity from the command line; flags look like
# --verbose-silent or --verbose-helpful
_commandLineVerbose = None
for arg in sys.argv[1:]:
if not arg.startswith('--verbose-'):
continue
level_str = arg[10:]
# If it doesn't match one of ours, then don't even
# bother noting it, we are just a 3rd-party library
# to somebody else's script.
if level_str in levels:
_commandLineVerbose = level_str
def __init__(self):
self.set_level('silent')
self.fileo = sys.stdout
def set_level(self, level):
'set the verbosity to one of the Verbose.levels strings'
if self._commandLineVerbose is not None:
level = self._commandLineVerbose
if level not in self.levels:
warnings.warn('matplotlib: unrecognized --verbose-* string "%s".'
' Legal values are %s' % (level, self.levels))
else:
self.level = level
def set_fileo(self, fname):
std = {
'sys.stdout': sys.stdout,
'sys.stderr': sys.stderr,
}
if fname in std:
self.fileo = std[fname]
else:
try:
fileo = open(fname, 'w')
except IOError:
raise ValueError('Verbose object could not open log file "%s" for writing.\nCheck your matplotlibrc verbose.fileo setting'%fname)
else:
self.fileo = fileo
def report(self, s, level='helpful'):
"""
print message s to self.fileo if self.level>=level. Return
value indicates whether a message was issued
"""
if self.ge(level):
print(s, file=self.fileo)
return True
return False
def wrap(self, fmt, func, level='helpful', always=True):
"""
return a callable function that wraps func and reports it
output through the verbose handler if current verbosity level
is higher than level
if always is True, the report will occur on every function
call; otherwise only on the first time the function is called
"""
assert callable(func)
def wrapper(*args, **kwargs):
ret = func(*args, **kwargs)
if (always or not wrapper._spoke):
spoke = self.report(fmt%ret, level)
if not wrapper._spoke: wrapper._spoke = spoke
return ret
wrapper._spoke = False
wrapper.__doc__ = func.__doc__
return wrapper
def ge(self, level):
'return true if self.level is >= level'
return self.vald[self.level]>=self.vald[level]
verbose=Verbose()
def checkdep_dvipng():
try:
s = subprocess.Popen(['dvipng','-version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
line = s.stdout.readlines()[1]
v = byte2str(line.split()[-1])
return v
except (IndexError, ValueError, OSError):
return None
def checkdep_ghostscript():
try:
if sys.platform == 'win32':
command_args = ['gswin32c', '--version']
else:
command_args = ['gs', '--version']
s = subprocess.Popen(command_args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
v = byte2str(s.stdout.read()[:-1])
return v
except (IndexError, ValueError, OSError):
return None
def checkdep_tex():
try:
s = subprocess.Popen(['tex','-version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
line = byte2str(s.stdout.readlines()[0])
pattern = '3\.1\d+'
match = re.search(pattern, line)
v = match.group(0)
return v
except (IndexError, ValueError, AttributeError, OSError):
return None
def checkdep_pdftops():
try:
s = subprocess.Popen(['pdftops','-v'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
for line in s.stderr:
if b'version' in line:
v = byte2str(line.split()[-1])
return v
except (IndexError, ValueError, UnboundLocalError, OSError):
return None
def checkdep_inkscape():
try:
s = subprocess.Popen(['inkscape','-V'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
for line in s.stdout:
if b'Inkscape' in line:
v = byte2str(line.split()[1])
break
return v
except (IndexError, ValueError, UnboundLocalError, OSError):
return None
def checkdep_xmllint():
try:
s = subprocess.Popen(['xmllint','--version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
for line in s.stderr:
if b'version' in line:
v = byte2str(line.split()[-1])
break
return v
except (IndexError, ValueError, UnboundLocalError, OSError):
return None
def compare_versions(a, b):
"return True if a is greater than or equal to b"
if a:
a = distutils.version.LooseVersion(a)
b = distutils.version.LooseVersion(b)
if a>=b: return True
else: return False
else: return False
def checkdep_ps_distiller(s):
if not s:
return False
flag = True
gs_req = '7.07'
gs_sugg = '7.07'
gs_v = checkdep_ghostscript()
if compare_versions(gs_v, gs_sugg): pass
elif compare_versions(gs_v, gs_req):
verbose.report(('ghostscript-%s found. ghostscript-%s or later '
'is recommended to use the ps.usedistiller option.') % (gs_v, gs_sugg))
else:
flag = False
warnings.warn(('matplotlibrc ps.usedistiller option can not be used '
'unless ghostscript-%s or later is installed on your system') % gs_req)
if s == 'xpdf':
pdftops_req = '3.0'
pdftops_req_alt = '0.9' # poppler version numbers, ugh
pdftops_v = checkdep_pdftops()
if compare_versions(pdftops_v, pdftops_req):
pass
elif compare_versions(pdftops_v, pdftops_req_alt) and not \
compare_versions(pdftops_v, '1.0'):
pass
else:
flag = False
warnings.warn(('matplotlibrc ps.usedistiller can not be set to '
'xpdf unless xpdf-%s or later is installed on your system') % pdftops_req)
if flag:
return s
else:
return False
def checkdep_usetex(s):
if not s:
return False
tex_req = '3.1415'
gs_req = '7.07'
gs_sugg = '7.07'
dvipng_req = '1.5'
flag = True
tex_v = checkdep_tex()
if compare_versions(tex_v, tex_req): pass
else:
flag = False
warnings.warn(('matplotlibrc text.usetex option can not be used '
'unless TeX-%s or later is '
'installed on your system') % tex_req)
dvipng_v = checkdep_dvipng()
if compare_versions(dvipng_v, dvipng_req): pass
else:
flag = False
warnings.warn( 'matplotlibrc text.usetex can not be used with *Agg '
'backend unless dvipng-1.5 or later is '
'installed on your system')
gs_v = checkdep_ghostscript()
if compare_versions(gs_v, gs_sugg): pass
elif compare_versions(gs_v, gs_req):
verbose.report(('ghostscript-%s found. ghostscript-%s or later is '
'recommended for use with the text.usetex '
'option.') % (gs_v, gs_sugg))
else:
flag = False
warnings.warn(('matplotlibrc text.usetex can not be used '
'unless ghostscript-%s or later is '
'installed on your system') % gs_req)
return flag
def _get_home():
"""Find user's home directory if possible.
Otherwise, returns None.
:see: http://mail.python.org/pipermail/python-list/2005-February/325395.html
"""
try:
path = os.path.expanduser("~")
except ImportError:
# This happens on Google App Engine (pwd module is not present).
pass
else:
if os.path.isdir(path):
return path
for evar in ('HOME', 'USERPROFILE', 'TMP'):
path = os.environ.get(evar)
if path is not None and os.path.isdir(path):
return path
return None
def _create_tmp_config_dir():
"""
If the config directory can not be created, create a temporary
directory.
Returns None if a writable temporary directory could not be created.
"""
import getpass
import tempfile
try:
tempdir = tempfile.gettempdir()
except NotImplementedError:
# Some restricted platforms (such as Google App Engine) do not provide
# gettempdir.
return None
tempdir = os.path.join(tempdir, 'matplotlib-%s' % getpass.getuser())
os.environ['MPLCONFIGDIR'] = tempdir
return tempdir
get_home = verbose.wrap('$HOME=%s', _get_home, always=False)
def _get_xdg_config_dir():
"""
Returns the XDG configuration directory, according to the `XDG
base directory spec
<http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_.
"""
return os.environ.get('XDG_CONFIG_HOME', os.path.join(get_home(), '.config'))
def _get_xdg_cache_dir():
"""
Returns the XDG cache directory, according to the `XDG
base directory spec
<http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_.
"""
return os.environ.get('XDG_CACHE_HOME', os.path.join(get_home(), '.cache'))
def _get_config_or_cache_dir(xdg_base):
from matplotlib.cbook import mkdirs
configdir = os.environ.get('MPLCONFIGDIR')
if configdir is not None:
if not os.path.exists(configdir):
from matplotlib.cbook import mkdirs
mkdirs(configdir)
if not _is_writable_dir(configdir):
return _create_tmp_config_dir()
return configdir
h = get_home()
p = os.path.join(h, '.matplotlib')
if (sys.platform.startswith('linux') and
not os.path.exists(p)):
p = os.path.join(xdg_base, 'matplotlib')
if os.path.exists(p):
if not _is_writable_dir(p):
return _create_tmp_config_dir()
else:
try:
mkdirs(p)
except OSError:
return _create_tmp_config_dir()
return p
def _get_configdir():
"""
Return the string representing the configuration directory.
The directory is chosen as follows:
1. If the MPLCONFIGDIR environment variable is supplied, choose that.
2a. On Linux, if `$HOME/.matplotlib` exists, choose that, but warn that
that is the old location. Barring that, follow the XDG specification
and look first in `$XDG_CONFIG_HOME`, if defined, or `$HOME/.config`.
2b. On other platforms, choose `$HOME/.matplotlib`.
3. If the chosen directory exists and is writable, use that as the
configuration directory.
4. If possible, create a temporary directory, and use it as the
configuration directory.
5. A writable directory could not be found or created; return None.
"""
return _get_config_or_cache_dir(_get_xdg_config_dir())
get_configdir = verbose.wrap('CONFIGDIR=%s', _get_configdir, always=False)
def _get_cachedir():
"""
Return the location of the cache directory.
The procedure used to find the directory is the same as for
_get_config_dir, except using `$XDG_CONFIG_HOME`/`~/.cache` instead.
"""
return _get_config_or_cache_dir(_get_xdg_cache_dir())
get_cachedir = verbose.wrap('CACHEDIR=%s', _get_cachedir, always=False)
def _get_data_path():
'get the path to matplotlib data'
if 'MATPLOTLIBDATA' in os.environ:
path = os.environ['MATPLOTLIBDATA']
if not os.path.isdir(path):
raise RuntimeError('Path in environment MATPLOTLIBDATA not a directory')
return path
path = os.sep.join([os.path.dirname(__file__), 'mpl-data'])
if os.path.isdir(path):
return path
# setuptools' namespace_packages may highjack this init file
# so need to try something known to be in matplotlib, not basemap
import matplotlib.afm
path = os.sep.join([os.path.dirname(matplotlib.afm.__file__), 'mpl-data'])
if os.path.isdir(path):
return path
# py2exe zips pure python, so still need special check
if getattr(sys,'frozen',None):
exe_path = os.path.dirname(sys.executable)
path = os.path.join(exe_path, 'mpl-data')
if os.path.isdir(path):
return path
# Try again assuming we need to step up one more directory
path = os.path.join(os.path.split(exe_path)[0], 'mpl-data')
if os.path.isdir(path):
return path
# Try again assuming sys.path[0] is a dir not a exe
path = os.path.join(sys.path[0], 'mpl-data')
if os.path.isdir(path):
return path
raise RuntimeError('Could not find the matplotlib data files')
def _get_data_path_cached():
if defaultParams['datapath'][0] is None:
defaultParams['datapath'][0] = _get_data_path()
return defaultParams['datapath'][0]
get_data_path = verbose.wrap('matplotlib data path %s', _get_data_path_cached,
always=False)
def get_example_data(fname):
"""
get_example_data is deprecated -- use matplotlib.cbook.get_sample_data instead
"""
raise NotImplementedError('get_example_data is deprecated -- use matplotlib.cbook.get_sample_data instead')
def get_py2exe_datafiles():
datapath = get_data_path()
_, tail = os.path.split(datapath)
d = {}
for root, _, files in os.walk(datapath):
# Need to explicitly remove cocoa_agg files or py2exe complains
# NOTE I dont know why, but do as previous version
if 'Matplotlib.nib' in files:
files.remove('Matplotlib.nib')
files = [os.path.join(root, filename) for filename in files]
root = root.replace(tail, 'mpl-data')
root = root[root.index('mpl-data'):]
d[root] = files
return list(d.items())
def matplotlib_fname():
"""
Get the location of the config file.
The file location is determined in the following order
- `$PWD/matplotlibrc`
- environment variable `MATPLOTLIBRC`
- `$MPLCONFIGDIR/matplotlib`
- On Linux,
- `$HOME/.matplotlib/matplotlibrc`, if it exists
- or `$XDG_CONFIG_HOME/matplotlib/matplotlibrc` (if
$XDG_CONFIG_HOME is defined)
- or `$HOME/.config/matplotlib/matplotlibrc` (if
$XDG_CONFIG_HOME is not defined)
- On other platforms,
- `$HOME/.matplotlib/matplotlibrc` if `$HOME` is defined.
- Lastly, it looks in `$MATPLOTLIBDATA/matplotlibrc` for a
system-defined copy.
"""
fname = os.path.join(os.getcwd(), 'matplotlibrc')
if os.path.exists(fname):
return fname
if 'MATPLOTLIBRC' in os.environ:
path = os.environ['MATPLOTLIBRC']
if os.path.exists(path):
fname = os.path.join(path, 'matplotlibrc')
if os.path.exists(fname):
return fname
configdir = _get_configdir()
if configdir is not None:
fname = os.path.join(configdir, 'matplotlibrc')
if os.path.exists(fname):
if (sys.platform.startswith('linux') and
fname == os.path.join(
get_home(), '.matplotlib', 'matplotlibrc')):
warnings.warn(
"Found matplotlib configuration in ~/.matplotlib/. "
"To conform with the XDG base directory standard, "
"this configuration location has been deprecated "
"on Linux, and the new location is now %r/matplotlib/. "
"Please move your configuration there to ensure that "
"matplotlib will continue to find it in the future." %
_get_xdg_config_dir())
return fname
path = get_data_path() # guaranteed to exist or raise
fname = os.path.join(path, 'matplotlibrc')
if not os.path.exists(fname):
warnings.warn('Could not find matplotlibrc; using defaults')
return fname
_deprecated_map = {
'text.fontstyle': 'font.style',
'text.fontangle': 'font.style',
'text.fontvariant': 'font.variant',
'text.fontweight': 'font.weight',
'text.fontsize': 'font.size',
'tick.size' : 'tick.major.size',
'svg.embed_char_paths' : 'svg.fonttype',
'savefig.extension' : 'savefig.format'
}
_deprecated_ignore_map = {
}
class RcParams(dict):
"""
A dictionary object including validation
validating functions are defined and associated with rc parameters in
:mod:`matplotlib.rcsetup`
"""
validate = dict((key, converter) for key, (default, converter) in
defaultParams.iteritems())
msg_depr = "%s is deprecated and replaced with %s; please use the latter."
msg_depr_ignore = "%s is deprecated and ignored. Use %s"
def __setitem__(self, key, val):
try:
if key in _deprecated_map:
alt = _deprecated_map[key]
warnings.warn(self.msg_depr % (key, alt))
key = alt
elif key in _deprecated_ignore_map:
alt = _deprecated_ignore_map[key]
warnings.warn(self.msg_depr_ignore % (key, alt))
return
cval = self.validate[key](val)
dict.__setitem__(self, key, cval)
except KeyError:
raise KeyError('%s is not a valid rc parameter.\
See rcParams.keys() for a list of valid parameters.' % (key,))
def __getitem__(self, key):
if key in _deprecated_map:
alt = _deprecated_map[key]
warnings.warn(self.msg_depr % (key, alt))
key = alt
elif key in _deprecated_ignore_map:
alt = _deprecated_ignore_map[key]
warnings.warn(self.msg_depr_ignore % (key, alt))
key = alt
return dict.__getitem__(self, key)
def __repr__(self):
import pprint
class_name = self.__class__.__name__
indent = len(class_name) + 1
repr_split = pprint.pformat(dict(self), indent=1,
width=80 - indent).split('\n')
repr_indented = ('\n' + ' ' * indent).join(repr_split)
return '{0}({1})'.format(class_name, repr_indented)
def __str__(self):
return '\n'.join('{0}: {1}'.format(k, v)
for k, v in sorted(self.items()))
def keys(self):
"""
Return sorted list of keys.
"""
k = list(dict.keys(self))
k.sort()
return k
def values(self):
"""
Return values in order of sorted keys.
"""
return [self[k] for k in self.keys()]
def find_all(self, pattern):
"""
Return the subset of this RcParams dictionary whose keys match,
using :func:`re.search`, the given ``pattern``.
.. note::
Changes to the returned dictionary are *not* propagated to
the parent RcParams dictionary.
"""
import re
pattern_re = re.compile(pattern)
return RcParams((key, value)
for key, value in self.items()
if pattern_re.search(key))
def rc_params(fail_on_error=False):
"""Return a :class:`matplotlib.RcParams` instance from the
default matplotlib rc file.
"""
fname = matplotlib_fname()
if not os.path.exists(fname):
# this should never happen, default in mpl-data should always be found
message = 'could not find rc file; returning defaults'
ret = RcParams([(key, default) for key, (default, _) in \
defaultParams.iteritems() ])
warnings.warn(message)
return ret
return rc_params_from_file(fname, fail_on_error)
def rc_params_from_file(fname, fail_on_error=False):
"""Return a :class:`matplotlib.RcParams` instance from the
contents of the given filename.
"""
cnt = 0
rc_temp = {}
with open(fname) as fd:
for line in fd:
cnt += 1
strippedline = line.split('#', 1)[0].strip()
if not strippedline: continue
tup = strippedline.split(':', 1)
if len(tup) != 2:
warnings.warn('Illegal line #%d\n\t%s\n\tin file "%s"' % \
(cnt, line, fname))
continue
key, val = tup
key = key.strip()
val = val.strip()
if key in rc_temp:
warnings.warn('Duplicate key in file "%s", line #%d' % \
(fname, cnt))
rc_temp[key] = (val, line, cnt)
ret = RcParams([(key, default) for key, (default, _) in \
defaultParams.iteritems()])
for key in ('verbose.level', 'verbose.fileo'):
if key in rc_temp:
val, line, cnt = rc_temp.pop(key)
if fail_on_error:
ret[key] = val # try to convert to proper type or raise
else:
try: ret[key] = val # try to convert to proper type or skip
except Exception as msg:
warnings.warn('Bad val "%s" on line #%d\n\t"%s"\n\tin file \
"%s"\n\t%s' % (val, cnt, line, fname, msg))
verbose.set_level(ret['verbose.level'])
verbose.set_fileo(ret['verbose.fileo'])
for key, (val, line, cnt) in rc_temp.iteritems():
if key in defaultParams:
if fail_on_error:
ret[key] = val # try to convert to proper type or raise
else:
try: ret[key] = val # try to convert to proper type or skip
except Exception as msg:
warnings.warn('Bad val "%s" on line #%d\n\t"%s"\n\tin file \
"%s"\n\t%s' % (val, cnt, line, fname, msg))
elif key in _deprecated_ignore_map:
warnings.warn('%s is deprecated. Update your matplotlibrc to use %s instead.'% (key, _deprecated_ignore_map[key]))
else:
print("""
Bad key "%s" on line %d in
%s.
You probably need to get an updated matplotlibrc file from
http://matplotlib.sf.net/_static/matplotlibrc or from the matplotlib source
distribution""" % (key, cnt, fname), file=sys.stderr)
if ret['datapath'] is None:
ret['datapath'] = get_data_path()
if not ret['text.latex.preamble'] == ['']:
verbose.report("""
*****************************************************************
You have the following UNSUPPORTED LaTeX preamble customizations:
%s
Please do not ask for support with these customizations active.
*****************************************************************
"""% '\n'.join(ret['text.latex.preamble']), 'helpful')
verbose.report('loaded rc file %s'%fname)
return ret
# this is the instance used by the matplotlib classes
rcParams = rc_params()
if rcParams['examples.directory']:
# paths that are intended to be relative to matplotlib_fname()
# are allowed for the examples.directory parameter.
# However, we will need to fully qualify the path because
# Sphinx requires absolute paths.
if not os.path.isabs(rcParams['examples.directory']):
_basedir, _fname = os.path.split(matplotlib_fname())
# Sometimes matplotlib_fname() can return relative paths,
# Also, using realpath() guarentees that Sphinx will use
# the same path that matplotlib sees (in case of weird symlinks).
_basedir = os.path.realpath(_basedir)
_fullpath = os.path.join(_basedir, rcParams['examples.directory'])
rcParams['examples.directory'] = _fullpath
rcParamsOrig = rcParams.copy()
rcParamsDefault = RcParams([ (key, default) for key, (default, converter) in \
defaultParams.iteritems() ])
rcParams['ps.usedistiller'] = checkdep_ps_distiller(rcParams['ps.usedistiller'])
rcParams['text.usetex'] = checkdep_usetex(rcParams['text.usetex'])
if rcParams['axes.formatter.use_locale']:
import locale
locale.setlocale(locale.LC_ALL, '')
def rc(group, **kwargs):
"""
Set the current rc params. Group is the grouping for the rc, e.g.,
for ``lines.linewidth`` the group is ``lines``, for
``axes.facecolor``, the group is ``axes``, and so on. Group may
also be a list or tuple of group names, e.g., (*xtick*, *ytick*).
*kwargs* is a dictionary attribute name/value pairs, eg::
rc('lines', linewidth=2, color='r')
sets the current rc params and is equivalent to::
rcParams['lines.linewidth'] = 2
rcParams['lines.color'] = 'r'
The following aliases are available to save typing for interactive
users:
===== =================
Alias Property
===== =================
'lw' 'linewidth'
'ls' 'linestyle'
'c' 'color'
'fc' 'facecolor'
'ec' 'edgecolor'
'mew' 'markeredgewidth'
'aa' 'antialiased'
===== =================
Thus you could abbreviate the above rc command as::
rc('lines', lw=2, c='r')
Note you can use python's kwargs dictionary facility to store
dictionaries of default parameters. e.g., you can customize the
font rc as follows::
font = {'family' : 'monospace',
'weight' : 'bold',
'size' : 'larger'}
rc('font', **font) # pass in the font dict as kwargs
This enables you to easily switch between several configurations.
Use :func:`~matplotlib.pyplot.rcdefaults` to restore the default
rc params after changes.
"""
aliases = {
'lw' : 'linewidth',
'ls' : 'linestyle',
'c' : 'color',
'fc' : 'facecolor',
'ec' : 'edgecolor',
'mew' : 'markeredgewidth',
'aa' : 'antialiased',
}
if is_string_like(group):
group = (group,)
for g in group:
for k,v in kwargs.iteritems():
name = aliases.get(k) or k
key = '%s.%s' % (g, name)
try:
rcParams[key] = v
except KeyError:
raise KeyError('Unrecognized key "%s" for group "%s" and name "%s"' %
(key, g, name))
def rcdefaults():
"""
Restore the default rc params. These are not the params loaded by
the rc file, but mpl's internal params. See rc_file_defaults for
reloading the default params from the rc file
"""
rcParams.update(rcParamsDefault)
def rc_file(fname):
"""
Update rc params from file.
"""
rcParams.update(rc_params_from_file(fname))
class rc_context(object):
"""
Return a context manager for managing rc settings.
This allows one to do::
with mpl.rc_context(fname='screen.rc'):
plt.plot(x, a)
with mpl.rc_context(fname='print.rc'):
plt.plot(x, b)
plt.plot(x, c)
The 'a' vs 'x' and 'c' vs 'x' plots would have settings from
'screen.rc', while the 'b' vs 'x' plot would have settings from
'print.rc'.
A dictionary can also be passed to the context manager::
with mpl.rc_context(rc={'text.usetex': True}, fname='screen.rc'):
plt.plot(x, a)
The 'rc' dictionary takes precedence over the settings loaded from
'fname'. Passing a dictionary only is also valid.
"""
def __init__(self, rc=None, fname=None):
self.rcdict = rc
self.fname = fname
self._rcparams = rcParams.copy()
if self.fname:
rc_file(self.fname)
if self.rcdict:
rcParams.update(self.rcdict)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
rcParams.update(self._rcparams)
def rc_file_defaults():
"""
Restore the default rc params from the original matplotlib rc that
was loaded
"""
rcParams.update(rcParamsOrig)
_use_error_msg = """ This call to matplotlib.use() has no effect
because the the backend has already been chosen;
matplotlib.use() must be called *before* pylab, matplotlib.pyplot,
or matplotlib.backends is imported for the first time.
"""
def use(arg, warn=True, force=False):
"""
Set the matplotlib backend to one of the known backends.
The argument is case-insensitive. *warn* specifies whether a
warning should be issued if a backend has already been set up.
*force* is an **experimental** flag that tells matplotlib to
attempt to initialize a new backend by reloading the backend
module.
.. note::
This function must be called *before* importing pyplot for
the first time; or, if you are not using pyplot, it must be called
before importing matplotlib.backends. If warn is True, a warning
is issued if you try and call this after pylab or pyplot have been
loaded. In certain black magic use cases, e.g.
:func:`pyplot.switch_backend`, we are doing the reloading necessary to
make the backend switch work (in some cases, e.g., pure image
backends) so one can set warn=False to suppress the warnings.
To find out which backend is currently set, see
:func:`matplotlib.get_backend`.
"""
# Lets determine the proper backend name first
if arg.startswith('module://'):
name = arg
else:
# Lowercase only non-module backend names (modules are case-sensitive)
arg = arg.lower()
name = validate_backend(arg)
# Check if we've already set up a backend
if 'matplotlib.backends' in sys.modules:
# Warn only if called with a different name
if (rcParams['backend'] != name) and warn:
warnings.warn(_use_error_msg)
# Unless we've been told to force it, just return
if not force:
return
need_reload = True
else:
need_reload = False
# Store the backend name
rcParams['backend'] = name
# If needed we reload here because a lot of setup code is triggered on
# module import. See backends/__init__.py for more detail.
if need_reload:
reload(sys.modules['matplotlib.backends'])
def get_backend():
"""Return the name of the current backend."""
return rcParams['backend']
def interactive(b):
"""
Set interactive mode to boolean b.
If b is True, then draw after every plotting command, eg, after xlabel
"""
rcParams['interactive'] = b
def is_interactive():
'Return true if plot mode is interactive'
b = rcParams['interactive']
return b
def tk_window_focus():
"""Return true if focus maintenance under TkAgg on win32 is on.
This currently works only for python.exe and IPython.exe.
Both IDLE and Pythonwin.exe fail badly when tk_window_focus is on."""
if rcParams['backend'] != 'TkAgg':
return False
return rcParams['tk.window_focus']
# Now allow command line to override
# Allow command line access to the backend with -d (MATLAB compatible
# flag)
for s in sys.argv[1:]:
if s.startswith('-d') and len(s) > 2: # look for a -d flag
try:
use(s[2:])
except (KeyError, ValueError):
pass
# we don't want to assume all -d flags are backends, eg -debug
default_test_modules = [
'matplotlib.tests.test_agg',
'matplotlib.tests.test_arrow_patches',
'matplotlib.tests.test_artist',
'matplotlib.tests.test_axes',
'matplotlib.tests.test_backend_pdf',
'matplotlib.tests.test_backend_pgf',
'matplotlib.tests.test_backend_qt4',
'matplotlib.tests.test_backend_svg',
'matplotlib.tests.test_basic',
'matplotlib.tests.test_bbox_tight',
'matplotlib.tests.test_cbook',
'matplotlib.tests.test_coding_standards',
'matplotlib.tests.test_collections',
'matplotlib.tests.test_colorbar',
'matplotlib.tests.test_colors',
'matplotlib.tests.test_compare_images',
'matplotlib.tests.test_contour',
'matplotlib.tests.test_dates',
'matplotlib.tests.test_delaunay',
'matplotlib.tests.test_figure',
'matplotlib.tests.test_image',
'matplotlib.tests.test_legend',
'matplotlib.tests.test_lines',
'matplotlib.tests.test_mathtext',
'matplotlib.tests.test_mlab',
'matplotlib.tests.test_patches',
'matplotlib.tests.test_path',
'matplotlib.tests.test_patheffects',
'matplotlib.tests.test_pickle',
'matplotlib.tests.test_png',
'matplotlib.tests.test_rcparams',
'matplotlib.tests.test_scale',
'matplotlib.tests.test_simplification',
'matplotlib.tests.test_spines',
'matplotlib.tests.test_streamplot',
'matplotlib.tests.test_subplots',
'matplotlib.tests.test_table',
'matplotlib.tests.test_text',
'matplotlib.tests.test_ticker',
'matplotlib.tests.test_tightlayout',
'matplotlib.tests.test_transforms',
'matplotlib.tests.test_triangulation',
]
def test(verbosity=1):
"""run the matplotlib test suite"""
old_backend = rcParams['backend']
try:
use('agg')
import nose
import nose.plugins.builtin
from .testing.noseclasses import KnownFailure
from nose.plugins.manager import PluginManager
from nose.plugins import multiprocess
# store the old values before overriding
plugins = []
plugins.append( KnownFailure() )
plugins.extend( [plugin() for plugin in nose.plugins.builtin.plugins] )
manager = PluginManager(plugins=plugins)
config = nose.config.Config(verbosity=verbosity, plugins=manager)
# Nose doesn't automatically instantiate all of the plugins in the
# child processes, so we have to provide the multiprocess plugin with
# a list.
multiprocess._instantiate_plugins = [KnownFailure]
success = nose.run( defaultTest=default_test_modules,
config=config,
)
finally:
if old_backend.lower() != 'agg':
use(old_backend)
return success
test.__test__ = False # nose: this function is not a test
verbose.report('matplotlib version %s'%__version__)
verbose.report('verbose.level %s'%verbose.level)
verbose.report('interactive is %s'%rcParams['interactive'])
verbose.report('platform is %s'%sys.platform)
verbose.report('loaded modules: %s'%sys.modules.iterkeys(), 'debug')
| unlicense |
aestrivex/mne-python | mne/viz/misc.py | 13 | 19748 | """Functions to make simple plots with M/EEG data
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Cathy Nangini <[email protected]>
# Mainak Jas <[email protected]>
#
# License: Simplified BSD
import copy
import warnings
from glob import glob
import os.path as op
from itertools import cycle
import numpy as np
from scipy import linalg
from ..surface import read_surface
from ..io.proj import make_projector
from ..utils import logger, verbose, get_subjects_dir
from ..io.pick import pick_types
from .utils import tight_layout, COLORS, _prepare_trellis
@verbose
def plot_cov(cov, info, exclude=[], colorbar=True, proj=False, show_svd=True,
show=True, verbose=None):
"""Plot Covariance data
Parameters
----------
cov : instance of Covariance
The covariance matrix.
info: dict
Measurement info.
exclude : list of string | str
List of channels to exclude. If empty do not exclude any channel.
If 'bads', exclude info['bads'].
colorbar : bool
Show colorbar or not.
proj : bool
Apply projections or not.
show_svd : bool
Plot also singular values of the noise covariance for each sensor
type. We show square roots ie. standard deviations.
show : bool
Show figure if True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig_cov : instance of matplotlib.pyplot.Figure
The covariance plot.
fig_svd : instance of matplotlib.pyplot.Figure | None
The SVD spectra plot of the covariance.
"""
if exclude == 'bads':
exclude = info['bads']
ch_names = [n for n in cov.ch_names if n not in exclude]
ch_idx = [cov.ch_names.index(n) for n in ch_names]
info_ch_names = info['ch_names']
sel_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude=exclude)
sel_mag = pick_types(info, meg='mag', eeg=False, ref_meg=False,
exclude=exclude)
sel_grad = pick_types(info, meg='grad', eeg=False, ref_meg=False,
exclude=exclude)
idx_eeg = [ch_names.index(info_ch_names[c])
for c in sel_eeg if info_ch_names[c] in ch_names]
idx_mag = [ch_names.index(info_ch_names[c])
for c in sel_mag if info_ch_names[c] in ch_names]
idx_grad = [ch_names.index(info_ch_names[c])
for c in sel_grad if info_ch_names[c] in ch_names]
idx_names = [(idx_eeg, 'EEG covariance', 'uV', 1e6),
(idx_grad, 'Gradiometers', 'fT/cm', 1e13),
(idx_mag, 'Magnetometers', 'fT', 1e15)]
idx_names = [(idx, name, unit, scaling)
for idx, name, unit, scaling in idx_names if len(idx) > 0]
C = cov.data[ch_idx][:, ch_idx]
if proj:
projs = copy.deepcopy(info['projs'])
# Activate the projection items
for p in projs:
p['active'] = True
P, ncomp, _ = make_projector(projs, ch_names)
if ncomp > 0:
logger.info(' Created an SSP operator (subspace dimension'
' = %d)' % ncomp)
C = np.dot(P, np.dot(C, P.T))
else:
logger.info(' The projection vectors do not apply to these '
'channels.')
import matplotlib.pyplot as plt
fig_cov = plt.figure(figsize=(2.5 * len(idx_names), 2.7))
for k, (idx, name, _, _) in enumerate(idx_names):
plt.subplot(1, len(idx_names), k + 1)
plt.imshow(C[idx][:, idx], interpolation="nearest", cmap='RdBu_r')
plt.title(name)
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.2, 0.26)
tight_layout(fig=fig_cov)
fig_svd = None
if show_svd:
fig_svd = plt.figure()
for k, (idx, name, unit, scaling) in enumerate(idx_names):
s = linalg.svd(C[idx][:, idx], compute_uv=False)
plt.subplot(1, len(idx_names), k + 1)
plt.ylabel('Noise std (%s)' % unit)
plt.xlabel('Eigenvalue index')
plt.semilogy(np.sqrt(s) * scaling)
plt.title(name)
tight_layout(fig=fig_svd)
if show:
plt.show()
return fig_cov, fig_svd
def plot_source_spectrogram(stcs, freq_bins, tmin=None, tmax=None,
source_index=None, colorbar=False, show=True):
"""Plot source power in time-freqency grid.
Parameters
----------
stcs : list of SourceEstimate
Source power for consecutive time windows, one SourceEstimate object
should be provided for each frequency bin.
freq_bins : list of tuples of float
Start and end points of frequency bins of interest.
tmin : float
Minimum time instant to show.
tmax : float
Maximum time instant to show.
source_index : int | None
Index of source for which the spectrogram will be plotted. If None,
the source with the largest activation will be selected.
colorbar : bool
If true, a colorbar will be added to the plot.
show : bool
Show figure if True.
"""
import matplotlib.pyplot as plt
# Input checks
if len(stcs) == 0:
raise ValueError('cannot plot spectrogram if len(stcs) == 0')
stc = stcs[0]
if tmin is not None and tmin < stc.times[0]:
raise ValueError('tmin cannot be smaller than the first time point '
'provided in stcs')
if tmax is not None and tmax > stc.times[-1] + stc.tstep:
raise ValueError('tmax cannot be larger than the sum of the last time '
'point and the time step, which are provided in stcs')
# Preparing time-frequency cell boundaries for plotting
if tmin is None:
tmin = stc.times[0]
if tmax is None:
tmax = stc.times[-1] + stc.tstep
time_bounds = np.arange(tmin, tmax + stc.tstep, stc.tstep)
freq_bounds = sorted(set(np.ravel(freq_bins)))
freq_ticks = copy.deepcopy(freq_bounds)
# Reject time points that will not be plotted and gather results
source_power = []
for stc in stcs:
stc = stc.copy() # copy since crop modifies inplace
stc.crop(tmin, tmax - stc.tstep)
source_power.append(stc.data)
source_power = np.array(source_power)
# Finding the source with maximum source power
if source_index is None:
source_index = np.unravel_index(source_power.argmax(),
source_power.shape)[1]
# If there is a gap in the frequency bins record its locations so that it
# can be covered with a gray horizontal bar
gap_bounds = []
for i in range(len(freq_bins) - 1):
lower_bound = freq_bins[i][1]
upper_bound = freq_bins[i + 1][0]
if lower_bound != upper_bound:
freq_bounds.remove(lower_bound)
gap_bounds.append((lower_bound, upper_bound))
# Preparing time-frequency grid for plotting
time_grid, freq_grid = np.meshgrid(time_bounds, freq_bounds)
# Plotting the results
fig = plt.figure(figsize=(9, 6))
plt.pcolor(time_grid, freq_grid, source_power[:, source_index, :],
cmap='Reds')
ax = plt.gca()
plt.title('Time-frequency source power')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
time_tick_labels = [str(np.round(t, 2)) for t in time_bounds]
n_skip = 1 + len(time_bounds) // 10
for i in range(len(time_bounds)):
if i % n_skip != 0:
time_tick_labels[i] = ''
ax.set_xticks(time_bounds)
ax.set_xticklabels(time_tick_labels)
plt.xlim(time_bounds[0], time_bounds[-1])
plt.yscale('log')
ax.set_yticks(freq_ticks)
ax.set_yticklabels([np.round(freq, 2) for freq in freq_ticks])
plt.ylim(freq_bounds[0], freq_bounds[-1])
plt.grid(True, ls='-')
if colorbar:
plt.colorbar()
tight_layout(fig=fig)
# Covering frequency gaps with horizontal bars
for lower_bound, upper_bound in gap_bounds:
plt.barh(lower_bound, time_bounds[-1] - time_bounds[0], upper_bound -
lower_bound, time_bounds[0], color='#666666')
if show:
plt.show()
return fig
def _plot_mri_contours(mri_fname, surf_fnames, orientation='coronal',
slices=None, show=True):
"""Plot BEM contours on anatomical slices.
Parameters
----------
mri_fname : str
The name of the file containing anatomical data.
surf_fnames : list of str
The filenames for the BEM surfaces in the format
['inner_skull.surf', 'outer_skull.surf', 'outer_skin.surf'].
orientation : str
'coronal' or 'axial' or 'sagittal'
slices : list of int
Slice indices.
show : bool
Show figure if True.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
"""
import matplotlib.pyplot as plt
import nibabel as nib
if orientation not in ['coronal', 'axial', 'sagittal']:
raise ValueError("Orientation must be 'coronal', 'axial' or "
"'sagittal'. Got %s." % orientation)
# Load the T1 data
nim = nib.load(mri_fname)
data = nim.get_data()
affine = nim.get_affine()
n_sag, n_axi, n_cor = data.shape
orientation_name2axis = dict(sagittal=0, axial=1, coronal=2)
orientation_axis = orientation_name2axis[orientation]
if slices is None:
n_slices = data.shape[orientation_axis]
slices = np.linspace(0, n_slices, 12, endpoint=False).astype(np.int)
# create of list of surfaces
surfs = list()
trans = linalg.inv(affine)
# XXX : next line is a hack don't ask why
trans[:3, -1] = [n_sag // 2, n_axi // 2, n_cor // 2]
for surf_fname in surf_fnames:
surf = dict()
surf['rr'], surf['tris'] = read_surface(surf_fname)
# move back surface to MRI coordinate system
surf['rr'] = nib.affines.apply_affine(trans, surf['rr'])
surfs.append(surf)
fig, axs = _prepare_trellis(len(slices), 4)
for ax, sl in zip(axs, slices):
# adjust the orientations for good view
if orientation == 'coronal':
dat = data[:, :, sl].transpose()
elif orientation == 'axial':
dat = data[:, sl, :]
elif orientation == 'sagittal':
dat = data[sl, :, :]
# First plot the anatomical data
ax.imshow(dat, cmap=plt.cm.gray)
ax.axis('off')
# and then plot the contours on top
for surf in surfs:
if orientation == 'coronal':
ax.tricontour(surf['rr'][:, 0], surf['rr'][:, 1],
surf['tris'], surf['rr'][:, 2],
levels=[sl], colors='yellow', linewidths=2.0)
elif orientation == 'axial':
ax.tricontour(surf['rr'][:, 2], surf['rr'][:, 0],
surf['tris'], surf['rr'][:, 1],
levels=[sl], colors='yellow', linewidths=2.0)
elif orientation == 'sagittal':
ax.tricontour(surf['rr'][:, 2], surf['rr'][:, 1],
surf['tris'], surf['rr'][:, 0],
levels=[sl], colors='yellow', linewidths=2.0)
plt.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0.,
hspace=0.)
if show:
plt.show()
return fig
def plot_bem(subject=None, subjects_dir=None, orientation='coronal',
slices=None, show=True):
"""Plot BEM contours on anatomical slices.
Parameters
----------
subject : str
Subject name.
subjects_dir : str | None
Path to the SUBJECTS_DIR. If None, the path is obtained by using
the environment variable SUBJECTS_DIR.
orientation : str
'coronal' or 'axial' or 'sagittal'.
slices : list of int
Slice indices.
show : bool
Show figure if True.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
# Get the MRI filename
mri_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
if not op.isfile(mri_fname):
raise IOError('MRI file "%s" does not exist' % mri_fname)
# Get the BEM surface filenames
bem_path = op.join(subjects_dir, subject, 'bem')
if not op.isdir(bem_path):
raise IOError('Subject bem directory "%s" does not exist' % bem_path)
surf_fnames = []
for surf_name in ['*inner_skull', '*outer_skull', '*outer_skin']:
surf_fname = glob(op.join(bem_path, surf_name + '.surf'))
if len(surf_fname) > 0:
surf_fname = surf_fname[0]
logger.info("Using surface: %s" % surf_fname)
surf_fnames.append(surf_fname)
if len(surf_fnames) == 0:
raise IOError('No surface files found. Surface files must end with '
'inner_skull.surf, outer_skull.surf or outer_skin.surf')
# Plot the contours
return _plot_mri_contours(mri_fname, surf_fnames, orientation=orientation,
slices=slices, show=show)
def plot_events(events, sfreq=None, first_samp=0, color=None, event_id=None,
axes=None, equal_spacing=True, show=True):
"""Plot events to get a visual display of the paradigm
Parameters
----------
events : array, shape (n_events, 3)
The events.
sfreq : float | None
The sample frequency. If None, data will be displayed in samples (not
seconds).
first_samp : int
The index of the first sample. Typically the raw.first_samp
attribute. It is needed for recordings on a Neuromag
system as the events are defined relative to the system
start and not to the beginning of the recording.
color : dict | None
Dictionary of event_id value and its associated color. If None,
colors are automatically drawn from a default list (cycled through if
number of events longer than list of default colors).
event_id : dict | None
Dictionary of event label (e.g. 'aud_l') and its associated
event_id value. Label used to plot a legend. If None, no legend is
drawn.
axes : instance of matplotlib.axes.AxesSubplot
The subplot handle.
equal_spacing : bool
Use equal spacing between events in y-axis.
show : bool
Show figure if True.
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
.. versionadded:: 0.9.0
"""
if sfreq is None:
sfreq = 1.0
xlabel = 'samples'
else:
xlabel = 'Time (s)'
events = np.asarray(events)
unique_events = np.unique(events[:, 2])
if event_id is not None:
# get labels and unique event ids from event_id dict,
# sorted by value
event_id_rev = dict((v, k) for k, v in event_id.items())
conditions, unique_events_id = zip(*sorted(event_id.items(),
key=lambda x: x[1]))
for this_event in unique_events_id:
if this_event not in unique_events:
raise ValueError('%s from event_id is not present in events.'
% this_event)
for this_event in unique_events:
if this_event not in unique_events_id:
warnings.warn('event %s missing from event_id will be ignored.'
% this_event)
else:
unique_events_id = unique_events
if color is None:
if len(unique_events) > len(COLORS):
warnings.warn('More events than colors available. '
'You should pass a list of unique colors.')
colors = cycle(COLORS)
color = dict()
for this_event, this_color in zip(unique_events_id, colors):
color[this_event] = this_color
else:
for this_event in color:
if this_event not in unique_events_id:
raise ValueError('%s from color is not present in events '
'or event_id.' % this_event)
for this_event in unique_events_id:
if this_event not in color:
warnings.warn('Color is not available for event %d. Default '
'colors will be used.' % this_event)
import matplotlib.pyplot as plt
fig = None
if axes is None:
fig = plt.figure()
ax = axes if axes else plt.gca()
unique_events_id = np.array(unique_events_id)
min_event = np.min(unique_events_id)
max_event = np.max(unique_events_id)
for idx, ev in enumerate(unique_events_id):
ev_mask = events[:, 2] == ev
kwargs = {}
if event_id is not None:
event_label = '{0} ({1})'.format(event_id_rev[ev],
np.sum(ev_mask))
kwargs['label'] = event_label
if ev in color:
kwargs['color'] = color[ev]
if equal_spacing:
ax.plot((events[ev_mask, 0] - first_samp) / sfreq,
(idx + 1) * np.ones(ev_mask.sum()), '.', **kwargs)
else:
ax.plot((events[ev_mask, 0] - first_samp) / sfreq,
events[ev_mask, 2], '.', **kwargs)
if equal_spacing:
ax.set_ylim(0, unique_events_id.size + 1)
ax.set_yticks(1 + np.arange(unique_events_id.size))
ax.set_yticklabels(unique_events_id)
else:
ax.set_ylim([min_event - 1, max_event + 1])
ax.set_xlabel(xlabel)
ax.set_ylabel('Events id')
ax.grid('on')
fig = fig if fig is not None else plt.gcf()
if event_id is not None:
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
fig.canvas.draw()
if show:
plt.show()
return fig
def _get_presser(fig):
"""Helper to get our press callback"""
callbacks = fig.canvas.callbacks.callbacks['button_press_event']
func = None
for key, val in callbacks.items():
if val.func.__class__.__name__ == 'partial':
func = val.func
break
assert func is not None
return func
def plot_dipole_amplitudes(dipoles, colors=None, show=True):
"""Plot the amplitude traces of a set of dipoles
Parameters
----------
dipoles : list of instance of Dipoles
The dipoles whose amplitudes should be shown.
colors: list of colors | None
Color to plot with each dipole. If None default colors are used.
show : bool
Show figure if True.
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
.. versionadded:: 0.9.0
"""
import matplotlib.pyplot as plt
if colors is None:
colors = cycle(COLORS)
fig, ax = plt.subplots(1, 1)
xlim = [np.inf, -np.inf]
for dip, color in zip(dipoles, colors):
ax.plot(dip.times, dip.amplitude, color=color, linewidth=1.5)
xlim[0] = min(xlim[0], dip.times[0])
xlim[1] = max(xlim[1], dip.times[-1])
ax.set_xlim(xlim)
ax.set_xlabel('Time (sec)')
ax.set_ylabel('Amplitude (nAm)')
if show:
fig.show()
return fig
| bsd-3-clause |
Eric89GXL/scikit-learn | benchmarks/bench_lasso.py | 297 | 3305 | """
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import pylab as pl
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
pl.figure('scikit-learn LASSO benchmark results')
pl.subplot(211)
pl.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
pl.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
pl.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
pl.subplot(212)
pl.plot(list_n_features, lasso_results, 'b-', label='Lasso')
pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
pl.title('%d samples, alpha=%s' % (n_samples, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
samuel1208/scikit-learn | sklearn/manifold/tests/test_spectral_embedding.py | 216 | 8091 | from nose.tools import assert_true
from nose.tools import assert_equal
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_raises
from nose.plugins.skip import SkipTest
from sklearn.manifold.spectral_embedding_ import SpectralEmbedding
from sklearn.manifold.spectral_embedding_ import _graph_is_connected
from sklearn.manifold import spectral_embedding
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_spectral_embedding_two_components(seed=36):
# Test spectral embedding with two components
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2,
n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed))
embedded_coordinate = se_precomp.fit_transform(affinity)
# Some numpy versions are touchy with types
embedded_coordinate = \
se_precomp.fit_transform(affinity.astype(np.float32))
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
def test_spectral_embedding_precomputed_affinity(seed=36):
# Test spectral embedding with precomputed kernel
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=2, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05))
def test_spectral_embedding_callable_affinity(seed=36):
# Test spectral embedding with callable affinity
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=2,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
assert_array_almost_equal(
se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
assert_true(
_check_with_col_sign_flipping(embed_rbf, embed_callable, 0.05))
def test_spectral_embedding_amg_solver(seed=36):
# Test spectral embedding with amg solver
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
raise SkipTest("pyamg not available.")
se_amg = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="amg", n_neighbors=5,
random_state=np.random.RandomState(seed))
se_arpack = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="arpack", n_neighbors=5,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_pipeline_spectral_clustering(seed=36):
# Test using pipeline to do spectral clustering
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(n_components=n_clusters,
affinity="rbf",
random_state=random_state)
se_knn = SpectralEmbedding(n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(
km.labels_,
true_labels), 1.0, 2)
def test_spectral_embedding_unknown_eigensolver(seed=36):
# Test that SpectralClustering fails with an unknown eigensolver
se = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>")
assert_raises(ValueError, se.fit, S)
def test_spectral_embedding_unknown_affinity(seed=36):
# Test that SpectralClustering fails with an unknown affinity type
se = SpectralEmbedding(n_components=1, affinity="<unknown>",
random_state=np.random.RandomState(seed))
assert_raises(ValueError, se.fit, S)
def test_connectivity(seed=36):
# Test that graph connectivity test works as expected
graph = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), False)
assert_equal(_graph_is_connected(csr_matrix(graph)), False)
assert_equal(_graph_is_connected(csc_matrix(graph)), False)
graph = np.array([[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), True)
assert_equal(_graph_is_connected(csr_matrix(graph)), True)
assert_equal(_graph_is_connected(csc_matrix(graph)), True)
def test_spectral_embedding_deterministic():
# Test that Spectral Embedding is deterministic
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
embedding_1 = spectral_embedding(sims)
embedding_2 = spectral_embedding(sims)
assert_array_almost_equal(embedding_1, embedding_2)
| bsd-3-clause |
anderspitman/scikit-bio | skbio/stats/ordination/_canonical_correspondence_analysis.py | 1 | 8276 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import numpy as np
import pandas as pd
from scipy.linalg import svd, lstsq
from skbio._base import OrdinationResults
from ._utils import corr, svd_rank, scale
from skbio.util._decorator import experimental
@experimental(as_of="0.4.0")
def cca(y, x, scaling=1):
r"""Compute canonical (also known as constrained) correspondence
analysis.
Canonical (or constrained) correspondence analysis is a
multivariate ordination technique. It appeared in community
ecology [1]_ and relates community composition to the variation in
the environment (or in other factors). It works from data on
abundances or counts of samples and constraints variables,
and outputs ordination axes that maximize sample separation among species.
It is better suited to extract the niches of taxa than linear
multivariate methods because it assumes unimodal response curves
(habitat preferences are often unimodal functions of habitat
variables [2]_).
As more environmental variables are added, the result gets more
similar to unconstrained ordination, so only the variables that
are deemed explanatory should be included in the analysis.
Parameters
----------
y : DataFrame
Samples by features table (n, m)
x : DataFrame
Samples by constraints table (n, q)
scaling : int, {1, 2}, optional
Scaling type 1 maintains :math:`\chi^2` distances between rows.
Scaling type 2 preserver :math:`\chi^2` distances between columns.
For a more detailed explanation of the interpretation, check Legendre &
Legendre 1998, section 9.4.3.
Returns
-------
OrdinationResults
Object that stores the cca results.
Raises
------
ValueError
If `x` and `y` have different number of rows
If `y` contains negative values
If `y` contains a row of only 0's.
NotImplementedError
If scaling is not 1 or 2.
Notes
-----
The algorithm is based on [3]_, \S 11.2, and is expected to give
the same results as ``cca(y, x)`` in R's package vegan, except
that this implementation won't drop constraining variables due to
perfect collinearity: the user needs to choose which ones to
input.
Canonical *correspondence* analysis shouldn't be confused with
canonical *correlation* analysis (CCorA, but sometimes called
CCA), a different technique to search for multivariate
relationships between two datasets. Canonical correlation analysis
is a statistical tool that, given two vectors of random variables,
finds linear combinations that have maximum correlation with each
other. In some sense, it assumes linear responses of "species" to
"environmental variables" and is not well suited to analyze
ecological data.
See Also
--------
ca
rda
OrdinationResults
References
----------
.. [1] Cajo J. F. Ter Braak, "Canonical Correspondence Analysis: A
New Eigenvector Technique for Multivariate Direct Gradient
Analysis", Ecology 67.5 (1986), pp. 1167-1179.
.. [2] Cajo J.F. Braak and Piet F.M. Verdonschot, "Canonical
correspondence analysis and related multivariate methods in
aquatic ecology", Aquatic Sciences 57.3 (1995), pp. 255-289.
.. [3] Legendre P. and Legendre L. 1998. Numerical
Ecology. Elsevier, Amsterdam.
"""
Y = y.as_matrix()
X = x.as_matrix()
# Perform parameter sanity checks
if X.shape[0] != Y.shape[0]:
raise ValueError("The samples by features table 'y' and the samples by"
" constraints table 'x' must have the same number of "
" rows. 'y': {0} 'x': {1}".format(X.shape[0],
Y.shape[0]))
if Y.min() < 0:
raise ValueError(
"The samples by features table 'y' must be nonnegative")
row_max = Y.max(axis=1)
if np.any(row_max <= 0):
# Or else the lstsq call to compute Y_hat breaks
raise ValueError("The samples by features table 'y' cannot contain a "
"row with only 0's")
if scaling not in {1, 2}:
raise NotImplementedError(
"Scaling {0} not implemented.".format(scaling))
# Step 1 (similar to Pearson chi-square statistic)
grand_total = Y.sum()
Q = Y / grand_total # Relative frequencies of Y (contingency table)
# Features and sample weights (marginal totals)
column_marginals = Q.sum(axis=0)
row_marginals = Q.sum(axis=1)
# Formula 9.32 in Lagrange & Lagrange (1998). Notice that it's an
# scaled version of the contribution of each cell towards Pearson
# chi-square statistic.
expected = np.outer(row_marginals, column_marginals)
Q_bar = (Q - expected) / np.sqrt(expected)
# Step 2. Standardize columns of X with respect to sample weights,
# using the maximum likelihood variance estimator (Legendre &
# Legendre 1998, p. 595)
X = scale(X, weights=row_marginals, ddof=0)
# Step 3. Weighted multiple regression.
X_weighted = row_marginals[:, None]**0.5 * X
B, _, rank_lstsq, _ = lstsq(X_weighted, Q_bar)
Y_hat = X_weighted.dot(B)
Y_res = Q_bar - Y_hat
# Step 4. Eigenvalue decomposition
u, s, vt = svd(Y_hat, full_matrices=False)
rank = svd_rank(Y_hat.shape, s)
s = s[:rank]
u = u[:, :rank]
vt = vt[:rank]
U = vt.T
# Step 5. Eq. 9.38
U_hat = Q_bar.dot(U) * s**-1
# Residuals analysis
u_res, s_res, vt_res = svd(Y_res, full_matrices=False)
rank = svd_rank(Y_res.shape, s_res)
s_res = s_res[:rank]
u_res = u_res[:, :rank]
vt_res = vt_res[:rank]
U_res = vt_res.T
U_hat_res = Y_res.dot(U_res) * s_res**-1
eigenvalues = np.r_[s, s_res]**2
# Scalings (p. 596 L&L 1998):
# feature scores, scaling 1
V = (column_marginals**-0.5)[:, None] * U
# sample scores, scaling 2
V_hat = (row_marginals**-0.5)[:, None] * U_hat
# sample scores, scaling 1
F = V_hat * s
# feature scores, scaling 2
F_hat = V * s
# Sample scores which are linear combinations of constraint
# variables
Z_scaling1 = ((row_marginals**-0.5)[:, None] *
Y_hat.dot(U))
Z_scaling2 = Z_scaling1 * s**-1
# Feature residual scores, scaling 1
V_res = (column_marginals**-0.5)[:, None] * U_res
# Sample residual scores, scaling 2
V_hat_res = (row_marginals**-0.5)[:, None] * U_hat_res
# Sample residual scores, scaling 1
F_res = V_hat_res * s_res
# Feature residual scores, scaling 2
F_hat_res = V_res * s_res
eigvals = eigenvalues
if scaling == 1:
features_scores = np.hstack((V, V_res))
sample_scores = np.hstack((F, F_res))
sample_constraints = np.hstack((Z_scaling1, F_res))
elif scaling == 2:
features_scores = np.hstack((F_hat, F_hat_res))
sample_scores = np.hstack((V_hat, V_hat_res))
sample_constraints = np.hstack((Z_scaling2, V_hat_res))
biplot_scores = corr(X_weighted, u)
pc_ids = ['CCA%d' % (i+1) for i in range(len(eigenvalues))]
sample_ids = y.index
feature_ids = y.columns
eigvals = pd.Series(eigenvalues, index=pc_ids)
samples = pd.DataFrame(sample_scores,
columns=pc_ids, index=sample_ids)
features = pd.DataFrame(features_scores,
columns=pc_ids, index=feature_ids)
biplot_scores = pd.DataFrame(biplot_scores)
sample_constraints = pd.DataFrame(sample_constraints,
index=sample_ids, columns=pc_ids)
return OrdinationResults(
"CCA", "Canonical Correspondence Analysis", eigvals, samples,
features=features, biplot_scores=biplot_scores,
sample_constraints=sample_constraints,
proportion_explained=eigvals / eigvals.sum())
| bsd-3-clause |
magne-max/zipline-ja | zipline/finance/controls.py | 1 | 12876 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import pandas as pd
from six import with_metaclass
from zipline.errors import (
AccountControlViolation,
TradingControlViolation,
)
class TradingControl(with_metaclass(abc.ABCMeta)):
"""
Abstract base class representing a fail-safe control on the behavior of any
algorithm.
"""
def __init__(self, **kwargs):
"""
Track any arguments that should be printed in the error message
generated by self.fail.
"""
self.__fail_args = kwargs
@abc.abstractmethod
def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Before any order is executed by TradingAlgorithm, this method should be
called *exactly once* on each registered TradingControl object.
If the specified asset and amount do not violate this TradingControl's
restraint given the information in `portfolio`, this method should
return None and have no externally-visible side-effects.
If the desired order violates this TradingControl's contraint, this
method should call self.fail(asset, amount).
"""
raise NotImplementedError
def fail(self, asset, amount, datetime, metadata=None):
"""
Raise a TradingControlViolation with information about the failure.
If dynamic information should be displayed as well, pass it in via
`metadata`.
"""
constraint = repr(self)
if metadata:
constraint = "{constraint} (Metadata: {metadata})".format(
constraint=constraint,
metadata=metadata
)
raise TradingControlViolation(asset=asset,
amount=amount,
datetime=datetime,
constraint=constraint)
def __repr__(self):
return "{name}({attrs})".format(name=self.__class__.__name__,
attrs=self.__fail_args)
class MaxOrderCount(TradingControl):
"""
TradingControl representing a limit on the number of orders that can be
placed in a given trading day.
"""
def __init__(self, max_count):
super(MaxOrderCount, self).__init__(max_count=max_count)
self.orders_placed = 0
self.max_count = max_count
self.current_date = None
def validate(self,
asset,
amount,
_portfolio,
algo_datetime,
_algo_current_data):
"""
Fail if we've already placed self.max_count orders today.
"""
algo_date = algo_datetime.date()
# Reset order count if it's a new day.
if self.current_date and self.current_date != algo_date:
self.orders_placed = 0
self.current_date = algo_date
if self.orders_placed >= self.max_count:
self.fail(asset, amount, algo_datetime)
self.orders_placed += 1
class RestrictedListOrder(TradingControl):
"""TradingControl representing a restricted list of assets that
cannot be ordered by the algorithm.
Parameters
----------
restricted_list : container[Asset]
The assets that cannot be ordered.
"""
def __init__(self, restricted_list):
super(RestrictedListOrder, self).__init__()
self.restricted_list = restricted_list
def validate(self,
asset,
amount,
_portfolio,
_algo_datetime,
_algo_current_data):
"""
Fail if the asset is in the restricted_list.
"""
if asset in self.restricted_list:
self.fail(asset, amount, _algo_datetime)
class MaxOrderSize(TradingControl):
"""
TradingControl representing a limit on the magnitude of any single order
placed with the given asset. Can be specified by share or by dollar
value.
"""
def __init__(self, asset=None, max_shares=None, max_notional=None):
super(MaxOrderSize, self).__init__(asset=asset,
max_shares=max_shares,
max_notional=max_notional)
self.asset = asset
self.max_shares = max_shares
self.max_notional = max_notional
if max_shares is None and max_notional is None:
raise ValueError(
"Must supply at least one of max_shares and max_notional"
)
if max_shares and max_shares < 0:
raise ValueError(
"max_shares cannot be negative."
)
if max_notional and max_notional < 0:
raise ValueError(
"max_notional must be positive."
)
def validate(self,
asset,
amount,
portfolio,
_algo_datetime,
algo_current_data):
"""
Fail if the magnitude of the given order exceeds either self.max_shares
or self.max_notional.
"""
if self.asset is not None and self.asset != asset:
return
if self.max_shares is not None and abs(amount) > self.max_shares:
self.fail(asset, amount, _algo_datetime)
current_asset_price = algo_current_data.current(asset, "price")
order_value = amount * current_asset_price
too_much_value = (self.max_notional is not None and
abs(order_value) > self.max_notional)
if too_much_value:
self.fail(asset, amount, _algo_datetime)
class MaxPositionSize(TradingControl):
"""
TradingControl representing a limit on the maximum position size that can
be held by an algo for a given asset.
"""
def __init__(self, asset=None, max_shares=None, max_notional=None):
super(MaxPositionSize, self).__init__(asset=asset,
max_shares=max_shares,
max_notional=max_notional)
self.asset = asset
self.max_shares = max_shares
self.max_notional = max_notional
if max_shares is None and max_notional is None:
raise ValueError(
"Must supply at least one of max_shares and max_notional"
)
if max_shares and max_shares < 0:
raise ValueError(
"max_shares cannot be negative."
)
if max_notional and max_notional < 0:
raise ValueError(
"max_notional must be positive."
)
def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if the given order would cause the magnitude of our position to be
greater in shares than self.max_shares or greater in dollar value than
self.max_notional.
"""
if self.asset is not None and self.asset != asset:
return
current_share_count = portfolio.positions[asset].amount
shares_post_order = current_share_count + amount
too_many_shares = (self.max_shares is not None and
abs(shares_post_order) > self.max_shares)
if too_many_shares:
self.fail(asset, amount, algo_datetime)
current_price = algo_current_data.current(asset, "price")
value_post_order = shares_post_order * current_price
too_much_value = (self.max_notional is not None and
abs(value_post_order) > self.max_notional)
if too_much_value:
self.fail(asset, amount, algo_datetime)
class LongOnly(TradingControl):
"""
TradingControl representing a prohibition against holding short positions.
"""
def validate(self,
asset,
amount,
portfolio,
_algo_datetime,
_algo_current_data):
"""
Fail if we would hold negative shares of asset after completing this
order.
"""
if portfolio.positions[asset].amount + amount < 0:
self.fail(asset, amount, _algo_datetime)
class AssetDateBounds(TradingControl):
"""
TradingControl representing a prohibition against ordering an asset before
its start_date, or after its end_date.
"""
def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if the algo has passed this Asset's end_date, or before the
Asset's start date.
"""
# If the order is for 0 shares, then silently pass through.
if amount == 0:
return
normalized_algo_dt = pd.Timestamp(algo_datetime).normalize()
# Fail if the algo is before this Asset's start_date
if asset.start_date:
normalized_start = pd.Timestamp(asset.start_date).normalize()
if normalized_algo_dt < normalized_start:
metadata = {
'asset_start_date': normalized_start
}
self.fail(asset, amount, algo_datetime, metadata=metadata)
# Fail if the algo has passed this Asset's end_date
if asset.end_date:
normalized_end = pd.Timestamp(asset.end_date).normalize()
if normalized_algo_dt > normalized_end:
metadata = {
'asset_end_date': normalized_end
}
self.fail(asset, amount, algo_datetime, metadata=metadata)
class AccountControl(with_metaclass(abc.ABCMeta)):
"""
Abstract base class representing a fail-safe control on the behavior of any
algorithm.
"""
def __init__(self, **kwargs):
"""
Track any arguments that should be printed in the error message
generated by self.fail.
"""
self.__fail_args = kwargs
@abc.abstractmethod
def validate(self,
_portfolio,
_account,
_algo_datetime,
_algo_current_data):
"""
On each call to handle data by TradingAlgorithm, this method should be
called *exactly once* on each registered AccountControl object.
If the check does not violate this AccountControl's restraint given
the information in `portfolio` and `account`, this method should
return None and have no externally-visible side-effects.
If the desired order violates this AccountControl's contraint, this
method should call self.fail().
"""
raise NotImplementedError
def fail(self):
"""
Raise an AccountControlViolation with information about the failure.
"""
raise AccountControlViolation(constraint=repr(self))
def __repr__(self):
return "{name}({attrs})".format(name=self.__class__.__name__,
attrs=self.__fail_args)
class MaxLeverage(AccountControl):
"""
AccountControl representing a limit on the maximum leverage allowed
by the algorithm.
"""
def __init__(self, max_leverage):
"""
max_leverage is the gross leverage in decimal form. For example,
2, limits an algorithm to trading at most double the account value.
"""
super(MaxLeverage, self).__init__(max_leverage=max_leverage)
self.max_leverage = max_leverage
if max_leverage is None:
raise ValueError(
"Must supply max_leverage"
)
if max_leverage < 0:
raise ValueError(
"max_leverage must be positive"
)
def validate(self,
_portfolio,
_account,
_algo_datetime,
_algo_current_data):
"""
Fail if the leverage is greater than the allowed leverage.
"""
if _account.leverage > self.max_leverage:
self.fail()
| apache-2.0 |
peri-source/peri | scripts/test_genmodel.py | 1 | 8048 |
# In this script, we use several demo images to check the quality of our
# generative model. We'll start with the simplest possible image -- a blank
# ``image`` taken without a sample -- and gradually move up in complexity
# to a real microscope image.
import numpy as np
import matplotlib.pyplot as plt
from peri import states
from peri import models
from peri import util
from peri.comp import ilms, objs, exactpsf, comp
import peri.opt.optimize as opt
from peri.viz.interaction import * # OrthoViewer & OrthoManipulator
# We start with featuring just a background image
# This image was taken with no sample, i.e. we're just measuring dark current
im_bkg = util.RawImage('./bkg_test.tif') # located in the scripts folder
# First we try with just a constant background
bkg_const = ilms.LegendrePoly3D(order=(1,1,1))
# Since we're just fitting a blank image, we don't need a psf at first, so we
# use the simplest model for the state: a SmoothFieldModel, which has just
# returns the illumination field:
st = states.ImageState(im_bkg, [bkg_const], mdl=models.SmoothFieldModel())
opt.do_levmarq(st, st.params)
# Since there's not a whole lot to see in this image, looking at the
# OrthoViewer or OrthoManipulator doesn't provide a lot of insight. Instead,
# we look at plots of the residuals along certain axes. We'll do this several
# times so I'll make a function:
def plot_averaged_residuals(st):
plt.figure(figsize=[15,6])
for i in range(3):
plt.subplot(1,3,1 + i)
mean_ax = tuple({0,1,2} - {i}) # which 2 directions to average over
plt.plot(st.residuals.mean(axis=mean_ax))
plt.title('{}-averaged'.format(['$xy$', '$xz$', '$yz$'][i]),
fontsize='large')
plot_averaged_residuals(st)
# From this we see that, while the background doesn't change much along z, it
# increases smoothly along y (the in-plane direction perpendicular to our line
# illumination; probably due to changing dwell times during the scan), and it
# changes in a bumpy manner along x (the direction of the line). This suggests
# we use a higher-order background -- perhhaps cubic in y and high-order in
# x to capture the oscillations
bkg_vry = ilms.LegendrePoly3D(order=(1,3,5))
st.set('ilm', bkg_vry)
opt.do_levmarq(st, st.params)
# Looking at the plot of the residuals again shows a significant improvement
# in the residuals:
plot_averaged_residuals(st)
# Next, let's check the illumination field. For this, we load a different
# image, one that I've taken of just dyed fluid. This image also has a
# coverslip in it, at the bottom. For now, we'll ignore this coverlip by
# setting the tile to be a specific region of z in the image. Moreover,
# since I know that our confocal has some scan issues at the edges of the
# image, I'll also crop out the image edges with the tile:
im_ilm = util.RawImage('./ilm_test.tif', tile=util.Tile([48,0,0], [49,100,100]))
# also located in the scripts folder
# Looking at the image, the illlumination is very stripey, due to the line-scan
# nature of our confocal. To account for this, we use a stripe-based ilm:
ilm = ilms.BarnesStreakLegPoly2P1D(npts=(50, 30, 20, 13, 7, 7, 7), zorder=1)
# (we only use a zorder of 1 since we've truncated to 1 pixel in z).
# Our real model will use a point-spread function that will blur out the ilm
# field slightly more. So we check the fit with a model that includes the
# type of point-spread function that we will use. A model that blur with a
# point-spread function takes considerably more time to evaluate than a
# SmoothFieldModel, so if you're not sure if your ilm is high enough order
# you should first check with a faster SmoothFieldModel.
psf = exactpsf.FixedSSChebLinePSF()
st = states.ImageState(im_ilm, [ilm, psf], mdl=models.BlurredFieldModel())
opt.do_levmarq(st, st.params)
# Plotting the residuals shows that they're good, aside from scan noise
# inherent to the line CCD camera:
plot_averaged_residuals(st)
# Next, we include the coverslip slide. To do this we first re-set the tile on
# our raw image to the full image:
im_ilm.set_tile(util.Tile([0,0,0], [60, 100, 100]))
# We then create a coverslip object:
slab = objs.Slab(zpos=35.0, category='obj')
# We also need our illumination to have a z-dependence now. Since we already
# spent time updating the ilm parameters, we update the corresponding values
# of the new ilm to the older ones:
ilm_z = ilms.BarnesStreakLegPoly2P1D(npts=(50, 30, 20, 13, 7, 7, 7), zorder=7)
ilm_z.set_values(ilm.params, ilm.values)
# Keep in mind that setting the parameters only works for this certain
# ilm classes. The BarnesStreakLegPoly2P1D (1) has the same named Barnes
# parameters regardless of the z-order, and (2) these determine the ilm field
# in the xy-plane in the same way when the number of points is the same and
# the image shape is the same. In contrast, if we had fit the in-plane
# illumination with a lower set of npts, just setting the parameters wouldn't
# work. [This is because the BarnesStreakLegPoly2P1D barnes parameters are
# labeled according to their distance from the leftmost edge of the image. So
# ilm-b0-49 would be on the rightmost side of the image if npts=(50,...), but
# it would be in the middle of the image if npts=(100,...).] We could get
# around this case by re-fitting the ilm when we start to fit the state below
# We need to add a background. In principle, we could be use the same bkg
# that worked for our blank image. However, in practice this doesn't work so
# well, leaving noticeable residuals in z (try it!). The reason for this is
# that the point-spread function has very long, power-law tails. While the psf
# describes the image of a point fairly well, when the psf is integrated over
# the entire area of the coverslip these tails become very long, too long to
# capture with a reasonably-sized numerical psf. To account for this, we do
# some minor analytical calculations and realize that the effect of the long-
# tails of the psf when convolved with a slab looks like a background that
# varies slowly in z. Thus, to account for some of the long-tails in the psf,
# we use a background which varies in z. Since this z-dependence won't couple
# with the dark-current xy dependence in our detector, we can split this out
# as bkg = f(x,y) + g(z), like so:
bkg = ilms.LegendrePoly2P1D(order=(7,3,5), category='bkg', operation='+')
# This detail is important not so much for its effect on the reconstruction
# of this blank image, but for what it illustrates -- while it is practically
# impossible to implement an exact generative model, careful thinking can allow
# for a model that is almost equivalent to the exact answer. To answer how
# much this approximation matters for measuring particle properties in an,
# we could generate an image with a more exact representation of these psf
# long tails and then fit it with our more approximate model.
# Incidentally, while the support of our psf is finite, it's quite large --
# 35 pixels in z, or 44% of the image in z! If we wanted, we could increase
# this by changing the ``support_size`` keyword when calling
# exactpsf.FixedSSChebLinePSF.
# Finally, we create an offset:
off = comp.GlobalScalar('offset', 0.0)
st = states.ImageState(im_ilm, [ilm_z, off, psf, bkg, slab])
# As an illustration, we'll optimize certain parameters first for speed.
# Since we know that our xy-ilm parameters are the same, we'll start by
# optimizing the background and the ilm-z- params.
opt.do_levmarq(st, st.get('bkg').params + ['ilm-z-{}'.format(i) for i in
range(ilm_z.zorder)], max_iter=2)
# Looking at this with the OrthoManipulator it already looks good, but we do
# a full optimization to ensure that we're at the best fit.
opt.do_levmarq(st, st.params, exptol=1e-5, errtol=1e-3)
# (this will take some time; half an hour or so on my machine)
# Finally, plotting the average along different directions looks good:
plot_averaged_residuals(st)
# With the OrthoManipulator, we can also see that our fit looks good:
OrthoManipulator(st)
| mit |
jungtaekkim/GPflowBO | GPflowBO/bo.py | 1 | 7854 | import numpy as np
import tensorflow as tf
from scipy.optimize import minimize
try:
import GPflow
except:
import gpflow
GPflow = gpflow
import gp
import acquisition
import utils
NUM_GRID = 100
class BO():
def __init__(self, arr_range, str_kernel='se', is_ard=True, str_acq='ei', fun_mean=None):
self.arr_range = arr_range
self.str_kernel = str_kernel
self.str_acq = str_acq
self.is_ard = is_ard
if fun_mean is None:
self.fun_mean = GPflow.mean_functions.Zero()
else:
list_mean = []
for elem in fun_mean:
if isinstance(elem[2], float):
list_mean.append(GPflow.mean_functions.Constant(elem[2]))
switched_mean = GPflow.mean_functions.SwitchedMeanFunction(list_mean)
self.fun_mean = switched_mean
self.orig_fun_mean = fun_mean
def _get_initial_random(self, int_seed=None):
if int_seed is not None:
np.random.seed(int_seed)
list_initial = []
for elem in self.arr_range:
list_initial.append(np.random.uniform(elem[0], elem[1]))
arr_initial = np.array(list_initial)
return arr_initial
def _get_initial_first(self):
arr_initial = self.arr_range[:, 0]
return arr_initial
def _get_initial(self, is_random=False, is_grid=False, fun_obj=None):
if is_random:
arr_initial = self._get_initial_random()
elif is_grid:
if fun_obj is None:
arr_initial = self._get_initial_first()
else:
list_grid = []
for elem in self.arr_range:
list_grid.append(np.linspace(elem[0], elem[1], NUM_GRID))
arr_grid = np.array(list_grid)
arr_initial = None
initial_best = np.inf
count_same = 0
for ind_initial in range(0, NUM_GRID**self.arr_range.shape[0]):
temp_ind = ind_initial
cur_initial = []
for ind_cur in range(0, self.arr_range.shape[0]):
cur_initial.append(arr_grid[ind_cur, temp_ind%NUM_GRID])
temp_ind /= NUM_GRID
cur_initial = np.array(cur_initial)
cur_acq = fun_obj(cur_initial)
if cur_acq < initial_best:
initial_best = cur_acq
arr_initial = cur_initial
elif cur_acq == initial_best:
count_same += 1
if count_same == NUM_GRID - 1:
arr_initial = self._get_initial_random()
else:
arr_initial = self._get_initial_first()
return arr_initial
def _add_kernel_indicator(self, X_train):
list_indicator = []
for elem_1 in X_train:
flag_in = False
ind_in = 0
for ind_elem_2, elem_2 in enumerate(self.orig_fun_mean):
# print 'Compared: ', elem_1, elem_2
if (elem_2[0] <= elem_1).all() and (elem_1 <= elem_2[1]).all():
flag_in = True
ind_in = ind_elem_2
list_indicator.append(ind_in)
# print 'Indicator', X_train.shape, len(list_indicator)
return np.hstack((X_train, 1.0 * np.array(list_indicator).reshape(-1, 1)))
def _optimize_objective(self, fun_acq, fun_gp, model_gp, X_test, Y_train=None):
if self.orig_fun_mean is not None:
X_test = self._add_kernel_indicator(X_test)
pred_mean, pred_std = fun_gp(model_gp, X_test)
if Y_train is not None:
result_acq = fun_acq(pred_mean, pred_std, Y_train)
else:
result_acq = fun_acq(pred_mean, pred_std)
with tf.Session() as sess:
# with tf.Session(config=tf.ConfigProto(device_count={'GPU': 0})) as sess:
arr_acq = sess.run(result_acq)
if arr_acq.shape == (1, 1):
return arr_acq[0, 0]
else:
return arr_acq
def optimize(self, X_train, Y_train):
if self.str_kernel == 'se':
fun_ker = GPflow.kernels.RBF(X_train.shape[1], ARD=self.is_ard)
else:
fun_ker = GPflow.kernels.RBF(X_train.shape[1], ARD=self.is_ard)
if self.orig_fun_mean is not None:
X_train_ = self._add_kernel_indicator(X_train)
else:
X_train_ = X_train
model_gp = gp.build_model_gp(X_train_, Y_train, fun_ker, self.fun_mean)
if self.str_acq == 'pi':
fun_acq = acquisition.pi
elif self.str_acq == 'ei':
fun_acq = acquisition.ei
elif self.str_acq == 'ucb':
fun_acq = acquisition.ucb
else:
fun_acq = acquisition.ei
if self.str_acq == 'pi' or self.str_acq == 'ei':
fun_obj = lambda X_test: -100.0 * self._optimize_objective(fun_acq, gp.predict_test, model_gp, np.atleast_2d(X_test), Y_train=Y_train)
else:
fun_obj = lambda X_test: -100.0 * self._optimize_objective(fun_acq, gp.predict_test, model_gp, np.atleast_2d(X_test), Y_train=None)
list_bounds = []
for elem in self.arr_range:
list_bounds.append(tuple(elem))
result_optimized = minimize(fun_obj, x0=self._get_initial(False, True, fun_obj), bounds=list_bounds, options={'disp': True})
return result_optimized
def optimize_many(model_bo, fun_target, X_train, Y_train, num_iter):
X_final = X_train
Y_final = Y_train
for _ in range(0, num_iter):
result_bo = model_bo.optimize(X_final, Y_final)
X_final = np.vstack((X_final, result_bo.x))
Y_final = np.vstack((Y_final, fun_target(result_bo.x)))
return X_final, Y_final
def optimize_many_with_random_init(model_bo, fun_target, num_init, num_iter, int_seed=None):
list_init = []
for ind_init in range(0, num_init):
if int_seed is None or int_seed == 0:
print('REMIND: seed is None or 0.')
list_init.append(model_bo._get_initial_random())
else:
list_init.append(model_bo._get_initial_random(int_seed**2 * (ind_init+1)))
X_init = np.array(list_init)
Y_init = fun_target(X_init)
X_final, Y_final = optimize_many(model_bo, fun_target, X_init, Y_init, num_iter)
return X_final, Y_final
if __name__ == '__main__':
import matplotlib.pyplot as plt
# X_train = np.array([[-2.0], [0.0], [0.2], [0.1], [0.05], [0.15], [1.0], [1.5], [2.05], [1.9], [2.0], [2.1], [3.0], [-1.0]])
X_train = np.array([[-2.5], [2.5]])
Y_train = np.sin(X_train)
X_test = np.linspace(-3, 3, 100)
X_test = X_test.reshape((100, 1))
sess = tf.Session()
model_bo = BO(np.array([[-3.0, 3.0]]), str_kernel='se', is_ard=True, str_acq='ei', fun_mean=np.array([[np.array([-3.0]), np.array([1.0]), 0.1], [np.array([1.0]), np.array([3.0]), -0.1]]))
print(model_bo.fun_mean)
model_gp = gp.build_model_gp(model_bo._add_kernel_indicator(X_train), Y_train, GPflow.kernels.RBF(X_train.shape[1], ARD=True), fun_mean=model_bo.fun_mean)
# model_gp = gp.build_model_gp(X_train, Y_train, GPflow.kernels.RBF(X_train.shape[1], ARD=True))
pred_mean, pred_std = gp.predict_test(model_gp, model_bo._add_kernel_indicator(X_test))
# pred_mean, pred_std = gp.predict_test(model_gp, X_test)
list_all_X_ = []
list_all_Y_ = []
for ind_all in range(0, 3):
X_, Y_ = optimize_many_with_random_init(model_bo, np.sin, 3, 5, 1 * (ind_all+1))
list_all_X_.append(X_)
list_all_Y_.append(Y_)
utils.plot_minimum_mean_std([list_all_Y_, list_all_Y_], ['abc', 'abcd'], './', 'abc', 3)
utils.plot_minimum_mean([list_all_Y_, list_all_Y_], ['abc', 'abcd'], './', 'abc', 3)
| mit |
yunfeilu/scikit-learn | sklearn/tests/test_kernel_ridge.py | 342 | 3027 | import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_almost_equal
X, y = make_regression(n_features=10)
Xcsr = sp.csr_matrix(X)
Xcsc = sp.csc_matrix(X)
Y = np.array([y, y]).T
def test_kernel_ridge():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csr():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsr, y).predict(Xcsr)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsr, y).predict(Xcsr)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csc():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsc, y).predict(Xcsc)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_singular_kernel():
# alpha=0 causes a LinAlgError in computing the dual coefficients,
# which causes a fallback to a lstsq solver. This is tested here.
pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X)
kr = KernelRidge(kernel="linear", alpha=0)
ignore_warnings(kr.fit)(X, y)
pred2 = kr.predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed():
for kernel in ["linear", "rbf", "poly", "cosine"]:
K = pairwise_kernels(X, X, metric=kernel)
pred = KernelRidge(kernel=kernel).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed_kernel_unchanged():
K = np.dot(X, X.T)
K2 = K.copy()
KernelRidge(kernel="precomputed").fit(K, y)
assert_array_almost_equal(K, K2)
def test_kernel_ridge_sample_weights():
K = np.dot(X, X.T) # precomputed kernel
sw = np.random.RandomState(0).rand(X.shape[0])
pred = Ridge(alpha=1,
fit_intercept=False).fit(X, y, sample_weight=sw).predict(X)
pred2 = KernelRidge(kernel="linear",
alpha=1).fit(X, y, sample_weight=sw).predict(X)
pred3 = KernelRidge(kernel="precomputed",
alpha=1).fit(K, y, sample_weight=sw).predict(K)
assert_array_almost_equal(pred, pred2)
assert_array_almost_equal(pred, pred3)
def test_kernel_ridge_multi_output():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X)
assert_array_almost_equal(pred, pred2)
pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
pred3 = np.array([pred3, pred3]).T
assert_array_almost_equal(pred2, pred3)
| bsd-3-clause |
funbaker/astropy | astropy/visualization/wcsaxes/tests/test_coordinate_helpers.py | 1 | 2034 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from unittest.mock import patch
import pytest
import matplotlib.pyplot as plt
from ..core import WCSAxes
from .... import units as u
from ....tests.image_tests import ignore_matplotlibrc
@ignore_matplotlibrc
def test_getaxislabel():
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal')
ax.coords[0].set_axislabel("X")
ax.coords[1].set_axislabel("Y")
assert ax.coords[0].get_axislabel() == "X"
assert ax.coords[1].get_axislabel() == "Y"
@pytest.fixture
def ax():
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal')
fig.add_axes(ax)
return ax
def assert_label_draw(ax, x_label, y_label):
ax.coords[0].set_axislabel("Label 1")
ax.coords[1].set_axislabel("Label 2")
with patch.object(ax.coords[0].axislabels, 'set_position') as pos1:
with patch.object(ax.coords[1].axislabels, 'set_position') as pos2:
ax.figure.canvas.draw()
assert pos1.call_count == x_label
assert pos2.call_count == y_label
@ignore_matplotlibrc
def test_label_visibility_rules_default(ax):
assert_label_draw(ax, True, True)
@ignore_matplotlibrc
def test_label_visibility_rules_label(ax):
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticks(values=[-9999]*u.deg)
assert_label_draw(ax, False, False)
@ignore_matplotlibrc
def test_label_visibility_rules_ticks(ax):
ax.coords[0].set_axislabel_visibility_rule('ticks')
ax.coords[1].set_axislabel_visibility_rule('ticks')
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticks(values=[-9999]*u.deg)
assert_label_draw(ax, True, False)
@ignore_matplotlibrc
def test_label_visibility_rules_always(ax):
ax.coords[0].set_axislabel_visibility_rule('always')
ax.coords[1].set_axislabel_visibility_rule('always')
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticks(values=[-9999]*u.deg)
assert_label_draw(ax, True, True)
| bsd-3-clause |
BhallaLab/moose-examples | snippets/function.py | 1 | 6612 | # function.py ---
#
# Filename: function.py
# Description:
# Author: Subhasis Ray
# Maintainer:
# Created: Tue Sep 9 17:59:50 2014 (+0530)
# Version:
# Last-Updated: Sun Dec 20 00:02:50 2015 (-0500)
# By: subha
# Update #: 4
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
import numpy as np
import sys
import matplotlib.pyplot as plt
import moose
simtime = 1.0
def example():
"""Function objects can be used to evaluate expressions with arbitrary
number of variables and constants. We can assign expression of the
form::
f(c0, c1, ..., cM, x0, x1, ..., xN, y0,..., yP )
where `c_i`'s are constants and `x_i`'s and `y_i`'s are variables.
The constants must be defined before setting the expression and
variables are connected via messages. The constants can have any
name, but the variable names must be of the form x{i} or y{i}
where i is increasing integer starting from 0.
The `x_i`'s are field elements and you have to set their number
first (function.x.num = N). Then you can connect any source field
sending out double to the 'input' destination field of the
`x[i]`.
The `y_i`'s are useful when the required variable is a value field
and is not available as a source field. In that case you connect
the `requestOut` source field of the function element to the
`get{Field}` destination field on the target element. The `y_i`'s
are automatically added on connecting. Thus, if you call::
moose.connect(function, 'requestOut', a, 'getSomeField')
moose.connect(function, 'requestOut', b, 'getSomeField')
then ``a.someField`` will be assigned to ``y0`` and
``b.someField`` will be assigned to ``y1``.
In this example we evaluate the expression: ``z = c0 * exp(c1 *
x0) * cos(y0)``
with x0 ranging from -1 to +1 and y0 ranging from -pi to
+pi. These values are stored in two stimulus tables called xtab
and ytab respectively, so that at each timestep the next values of
x0 and y0 are assigned to the function.
Along with the value of the expression itself we also compute its
derivative with respect to y0 and its derivative with respect to
time (rate). The former uses a five-point stencil for the
numerical differentiation and has a glitch at y=0. The latter uses
backward difference divided by dt.
Unlike Func class, the number of variables and constants are
unlimited in Function and you can set all the variables via
messages.
"""
demo = moose.Neutral('/model')
function = moose.Function('/model/function')
function.c['c0'] = 1.0
function.c['c1'] = 2.0
#function.x.num = 1
function.expr = 'c0 * exp(c1*x0) * cos(y0) + sin(t)'
# mode 0 - evaluate function value, derivative and rate
# mode 1 - just evaluate function value,
# mode 2 - evaluate derivative,
# mode 3 - evaluate rate
function.mode = 0
function.independent = 'y0'
nsteps = 1000
xarr = np.linspace(0.0, 1.0, nsteps)
# Stimulus tables allow you to store sequences of numbers which
# are delivered via the 'output' message at each time step. This
# is a placeholder and in real scenario you will be using any
# sourceFinfo that sends out a double value.
input_x = moose.StimulusTable('/xtab')
input_x.vector = xarr
input_x.startTime = 0.0
input_x.stepPosition = xarr[0]
input_x.stopTime = simtime
moose.connect(input_x, 'output', function.x[0], 'input')
yarr = np.linspace(-np.pi, np.pi, nsteps)
input_y = moose.StimulusTable('/ytab')
input_y.vector = yarr
input_y.startTime = 0.0
input_y.stepPosition = yarr[0]
input_y.stopTime = simtime
moose.connect(function, 'requestOut', input_y, 'getOutputValue')
# data recording
result = moose.Table('/ztab')
moose.connect(result, 'requestOut', function, 'getValue')
derivative = moose.Table('/zprime')
moose.connect(derivative, 'requestOut', function, 'getDerivative')
rate = moose.Table('/dz_by_dt')
moose.connect(rate, 'requestOut', function, 'getRate')
x_rec = moose.Table('/xrec')
moose.connect(x_rec, 'requestOut', input_x, 'getOutputValue')
y_rec = moose.Table('/yrec')
moose.connect(y_rec, 'requestOut', input_y, 'getOutputValue')
dt = simtime/nsteps
for ii in range(32):
moose.setClock(ii, dt)
moose.reinit()
moose.start(simtime)
# Uncomment the following lines and the import matplotlib.pyplot as plt on top
# of this file to display the plot.
plt.subplot(3,1,1)
plt.plot(x_rec.vector, result.vector, 'r-', label='z = {}'.format(function.expr))
z = function.c['c0'] * np.exp(function.c['c1'] * xarr) * np.cos(yarr) + np.sin(np.arange(len(xarr)) * dt)
plt.plot(xarr, z, 'b--', label='numpy computed')
plt.xlabel('x')
plt.ylabel('z')
plt.legend()
plt.subplot(3,1,2)
plt.plot(y_rec.vector, derivative.vector, 'r-', label='dz/dy0')
# derivatives computed by putting x values in the analytical formula
dzdy = function.c['c0'] * np.exp(function.c['c1'] * xarr) * (- np.sin(yarr))
plt.plot(yarr, dzdy, 'b--', label='numpy computed')
plt.xlabel('y')
plt.ylabel('dz/dy')
plt.legend()
plt.subplot(3,1,3)
# *** BEWARE *** The first two entries are spurious. Entry 0 is
# *** from reinit sending out the defaults. Entry 2 is because
# *** there is no lastValue for computing real forward difference.
plt.plot(np.arange(2, len(rate.vector), 1) * dt, rate.vector[2:], 'r-', label='dz/dt')
dzdt = np.diff(z)/dt
plt.plot(np.arange(0, len(dzdt), 1.0) * dt, dzdt, 'b--', label='numpy computed')
plt.xlabel('t')
plt.ylabel('dz/dt')
plt.legend()
plt.tight_layout()
plt.show()
if __name__ == '__main__':
example()
#
# function.py ends here
| gpl-2.0 |
pulinagrawal/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/rcsetup.py | 69 | 23344 | """
The rcsetup module contains the default values and the validation code for
customization using matplotlib's rc settings.
Each rc setting is assigned a default value and a function used to validate any
attempted changes to that setting. The default values and validation functions
are defined in the rcsetup module, and are used to construct the rcParams global
object which stores the settings and is referenced throughout matplotlib.
These default values should be consistent with the default matplotlibrc file
that actually reflects the values given here. Any additions or deletions to the
parameter set listed here should also be visited to the
:file:`matplotlibrc.template` in matplotlib's root source directory.
"""
import os
import warnings
from matplotlib.fontconfig_pattern import parse_fontconfig_pattern
from matplotlib.colors import is_color_like
#interactive_bk = ['gtk', 'gtkagg', 'gtkcairo', 'fltkagg', 'qtagg', 'qt4agg',
# 'tkagg', 'wx', 'wxagg', 'cocoaagg']
# The capitalized forms are needed for ipython at present; this may
# change for later versions.
interactive_bk = ['GTK', 'GTKAgg', 'GTKCairo', 'FltkAgg', 'MacOSX',
'QtAgg', 'Qt4Agg', 'TkAgg', 'WX', 'WXAgg', 'CocoaAgg']
non_interactive_bk = ['agg', 'cairo', 'emf', 'gdk',
'pdf', 'ps', 'svg', 'template']
all_backends = interactive_bk + non_interactive_bk
class ValidateInStrings:
def __init__(self, key, valid, ignorecase=False):
'valid is a list of legal strings'
self.key = key
self.ignorecase = ignorecase
def func(s):
if ignorecase: return s.lower()
else: return s
self.valid = dict([(func(k),k) for k in valid])
def __call__(self, s):
if self.ignorecase: s = s.lower()
if s in self.valid: return self.valid[s]
raise ValueError('Unrecognized %s string "%s": valid strings are %s'
% (self.key, s, self.valid.values()))
def validate_path_exists(s):
'If s is a path, return s, else False'
if os.path.exists(s): return s
else:
raise RuntimeError('"%s" should be a path but it does not exist'%s)
def validate_bool(b):
'Convert b to a boolean or raise'
if type(b) is str:
b = b.lower()
if b in ('t', 'y', 'yes', 'on', 'true', '1', 1, True): return True
elif b in ('f', 'n', 'no', 'off', 'false', '0', 0, False): return False
else:
raise ValueError('Could not convert "%s" to boolean' % b)
def validate_bool_maybe_none(b):
'Convert b to a boolean or raise'
if type(b) is str:
b = b.lower()
if b=='none': return None
if b in ('t', 'y', 'yes', 'on', 'true', '1', 1, True): return True
elif b in ('f', 'n', 'no', 'off', 'false', '0', 0, False): return False
else:
raise ValueError('Could not convert "%s" to boolean' % b)
def validate_float(s):
'convert s to float or raise'
try: return float(s)
except ValueError:
raise ValueError('Could not convert "%s" to float' % s)
def validate_int(s):
'convert s to int or raise'
try: return int(s)
except ValueError:
raise ValueError('Could not convert "%s" to int' % s)
def validate_fonttype(s):
'confirm that this is a Postscript of PDF font type that we know how to convert to'
fonttypes = { 'type3': 3,
'truetype': 42 }
try:
fonttype = validate_int(s)
except ValueError:
if s.lower() in fonttypes.keys():
return fonttypes[s.lower()]
raise ValueError('Supported Postscript/PDF font types are %s' % fonttypes.keys())
else:
if fonttype not in fonttypes.values():
raise ValueError('Supported Postscript/PDF font types are %s' % fonttypes.values())
return fonttype
#validate_backend = ValidateInStrings('backend', all_backends, ignorecase=True)
_validate_standard_backends = ValidateInStrings('backend', all_backends, ignorecase=True)
def validate_backend(s):
if s.startswith('module://'): return s
else: return _validate_standard_backends(s)
validate_numerix = ValidateInStrings('numerix',[
'Numeric','numarray','numpy',
], ignorecase=True)
validate_toolbar = ValidateInStrings('toolbar',[
'None','classic','toolbar2',
], ignorecase=True)
def validate_autolayout(v):
if v:
warnings.warn("figure.autolayout is not currently supported")
class validate_nseq_float:
def __init__(self, n):
self.n = n
def __call__(self, s):
'return a seq of n floats or raise'
if type(s) is str:
ss = s.split(',')
if len(ss) != self.n:
raise ValueError('You must supply exactly %d comma separated values'%self.n)
try:
return [float(val) for val in ss]
except ValueError:
raise ValueError('Could not convert all entries to floats')
else:
assert type(s) in (list,tuple)
if len(s) != self.n:
raise ValueError('You must supply exactly %d values'%self.n)
return [float(val) for val in s]
class validate_nseq_int:
def __init__(self, n):
self.n = n
def __call__(self, s):
'return a seq of n ints or raise'
if type(s) is str:
ss = s.split(',')
if len(ss) != self.n:
raise ValueError('You must supply exactly %d comma separated values'%self.n)
try:
return [int(val) for val in ss]
except ValueError:
raise ValueError('Could not convert all entries to ints')
else:
assert type(s) in (list,tuple)
if len(s) != self.n:
raise ValueError('You must supply exactly %d values'%self.n)
return [int(val) for val in s]
def validate_color(s):
'return a valid color arg'
if s.lower() == 'none':
return 'None'
if is_color_like(s):
return s
stmp = '#' + s
if is_color_like(stmp):
return stmp
# If it is still valid, it must be a tuple.
colorarg = s
msg = ''
if s.find(',')>=0:
# get rid of grouping symbols
stmp = ''.join([ c for c in s if c.isdigit() or c=='.' or c==','])
vals = stmp.split(',')
if len(vals)!=3:
msg = '\nColor tuples must be length 3'
else:
try:
colorarg = [float(val) for val in vals]
except ValueError:
msg = '\nCould not convert all entries to floats'
if not msg and is_color_like(colorarg):
return colorarg
raise ValueError('%s does not look like a color arg%s'%(s, msg))
def validate_stringlist(s):
'return a list'
if type(s) is str:
return [ v.strip() for v in s.split(',') ]
else:
assert type(s) in [list,tuple]
return [ str(v) for v in s ]
validate_orientation = ValidateInStrings('orientation',[
'landscape', 'portrait',
])
def validate_aspect(s):
if s in ('auto', 'equal'):
return s
try:
return float(s)
except ValueError:
raise ValueError('not a valid aspect specification')
def validate_fontsize(s):
if type(s) is str:
s = s.lower()
if s in ['xx-small', 'x-small', 'small', 'medium', 'large', 'x-large',
'xx-large', 'smaller', 'larger']:
return s
try:
return float(s)
except ValueError:
raise ValueError('not a valid font size')
def validate_font_properties(s):
parse_fontconfig_pattern(s)
return s
validate_fontset = ValidateInStrings('fontset', ['cm', 'stix', 'stixsans', 'custom'])
validate_verbose = ValidateInStrings('verbose',[
'silent', 'helpful', 'debug', 'debug-annoying',
])
validate_cairo_format = ValidateInStrings('cairo_format',
['png', 'ps', 'pdf', 'svg'],
ignorecase=True)
validate_ps_papersize = ValidateInStrings('ps_papersize',[
'auto', 'letter', 'legal', 'ledger',
'a0', 'a1', 'a2','a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', 'a10',
'b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8', 'b9', 'b10',
], ignorecase=True)
def validate_ps_distiller(s):
if type(s) is str:
s = s.lower()
if s in ('none',None):
return None
elif s in ('false', False):
return False
elif s in ('ghostscript', 'xpdf'):
return s
else:
raise ValueError('matplotlibrc ps.usedistiller must either be none, ghostscript or xpdf')
validate_joinstyle = ValidateInStrings('joinstyle',['miter', 'round', 'bevel'], ignorecase=True)
validate_capstyle = ValidateInStrings('capstyle',['butt', 'round', 'projecting'], ignorecase=True)
validate_negative_linestyle = ValidateInStrings('negative_linestyle',['solid', 'dashed'], ignorecase=True)
def validate_negative_linestyle_legacy(s):
try:
res = validate_negative_linestyle(s)
return res
except ValueError:
dashes = validate_nseq_float(2)(s)
warnings.warn("Deprecated negative_linestyle specification; use 'solid' or 'dashed'")
return (0, dashes) # (offset, (solid, blank))
validate_legend_loc = ValidateInStrings('legend_loc',[
'best',
'upper right',
'upper left',
'lower left',
'lower right',
'right',
'center left',
'center right',
'lower center',
'upper center',
'center',
], ignorecase=True)
class ValidateInterval:
"""
Value must be in interval
"""
def __init__(self, vmin, vmax, closedmin=True, closedmax=True):
self.vmin = vmin
self.vmax = vmax
self.cmin = closedmin
self.cmax = closedmax
def __call__(self, s):
try: s = float(s)
except: raise RuntimeError('Value must be a float; found "%s"'%s)
if self.cmin and s<self.vmin:
raise RuntimeError('Value must be >= %f; found "%f"'%(self.vmin, s))
elif not self.cmin and s<=self.vmin:
raise RuntimeError('Value must be > %f; found "%f"'%(self.vmin, s))
if self.cmax and s>self.vmax:
raise RuntimeError('Value must be <= %f; found "%f"'%(self.vmax, s))
elif not self.cmax and s>=self.vmax:
raise RuntimeError('Value must be < %f; found "%f"'%(self.vmax, s))
return s
# a map from key -> value, converter
defaultParams = {
'backend' : ['Agg', validate_backend], # agg is certainly present
'backend_fallback' : [True, validate_bool], # agg is certainly present
'numerix' : ['numpy', validate_numerix],
'maskedarray' : [False, validate_bool],
'toolbar' : ['toolbar2', validate_toolbar],
'datapath' : [None, validate_path_exists], # handled by _get_data_path_cached
'units' : [False, validate_bool],
'interactive' : [False, validate_bool],
'timezone' : ['UTC', str],
# the verbosity setting
'verbose.level' : ['silent', validate_verbose],
'verbose.fileo' : ['sys.stdout', str],
# line props
'lines.linewidth' : [1.0, validate_float], # line width in points
'lines.linestyle' : ['-', str], # solid line
'lines.color' : ['b', validate_color], # blue
'lines.marker' : ['None', str], # black
'lines.markeredgewidth' : [0.5, validate_float],
'lines.markersize' : [6, validate_float], # markersize, in points
'lines.antialiased' : [True, validate_bool], # antialised (no jaggies)
'lines.dash_joinstyle' : ['miter', validate_joinstyle],
'lines.solid_joinstyle' : ['miter', validate_joinstyle],
'lines.dash_capstyle' : ['butt', validate_capstyle],
'lines.solid_capstyle' : ['projecting', validate_capstyle],
# patch props
'patch.linewidth' : [1.0, validate_float], # line width in points
'patch.edgecolor' : ['k', validate_color], # black
'patch.facecolor' : ['b', validate_color], # blue
'patch.antialiased' : [True, validate_bool], # antialised (no jaggies)
# font props
'font.family' : ['sans-serif', str], # used by text object
'font.style' : ['normal', str], #
'font.variant' : ['normal', str], #
'font.stretch' : ['normal', str], #
'font.weight' : ['normal', str], #
'font.size' : [12.0, validate_float], #
'font.serif' : [['Bitstream Vera Serif', 'DejaVu Serif',
'New Century Schoolbook', 'Century Schoolbook L',
'Utopia', 'ITC Bookman', 'Bookman',
'Nimbus Roman No9 L','Times New Roman',
'Times','Palatino','Charter','serif'],
validate_stringlist],
'font.sans-serif' : [['Bitstream Vera Sans', 'DejaVu Sans',
'Lucida Grande', 'Verdana', 'Geneva', 'Lucid',
'Arial', 'Helvetica', 'Avant Garde', 'sans-serif'],
validate_stringlist],
'font.cursive' : [['Apple Chancery','Textile','Zapf Chancery',
'Sand','cursive'], validate_stringlist],
'font.fantasy' : [['Comic Sans MS','Chicago','Charcoal','Impact'
'Western','fantasy'], validate_stringlist],
'font.monospace' : [['Bitstream Vera Sans Mono', 'DejaVu Sans Mono',
'Andale Mono', 'Nimbus Mono L', 'Courier New',
'Courier','Fixed', 'Terminal','monospace'],
validate_stringlist],
# text props
'text.color' : ['k', validate_color], # black
'text.usetex' : [False, validate_bool],
'text.latex.unicode' : [False, validate_bool],
'text.latex.preamble' : [[''], validate_stringlist],
'text.dvipnghack' : [None, validate_bool_maybe_none],
'text.fontstyle' : ['normal', str],
'text.fontangle' : ['normal', str],
'text.fontvariant' : ['normal', str],
'text.fontweight' : ['normal', str],
'text.fontsize' : ['medium', validate_fontsize],
'mathtext.cal' : ['cursive', validate_font_properties],
'mathtext.rm' : ['serif', validate_font_properties],
'mathtext.tt' : ['monospace', validate_font_properties],
'mathtext.it' : ['serif:italic', validate_font_properties],
'mathtext.bf' : ['serif:bold', validate_font_properties],
'mathtext.sf' : ['sans\-serif', validate_font_properties],
'mathtext.fontset' : ['cm', validate_fontset],
'mathtext.fallback_to_cm' : [True, validate_bool],
'image.aspect' : ['equal', validate_aspect], # equal, auto, a number
'image.interpolation' : ['bilinear', str],
'image.cmap' : ['jet', str], # one of gray, jet, etc
'image.lut' : [256, validate_int], # lookup table
'image.origin' : ['upper', str], # lookup table
'image.resample' : [False, validate_bool],
'contour.negative_linestyle' : ['dashed', validate_negative_linestyle_legacy],
# axes props
'axes.axisbelow' : [False, validate_bool],
'axes.hold' : [True, validate_bool],
'axes.facecolor' : ['w', validate_color], # background color; white
'axes.edgecolor' : ['k', validate_color], # edge color; black
'axes.linewidth' : [1.0, validate_float], # edge linewidth
'axes.titlesize' : ['large', validate_fontsize], # fontsize of the axes title
'axes.grid' : [False, validate_bool], # display grid or not
'axes.labelsize' : ['medium', validate_fontsize], # fontsize of the x any y labels
'axes.labelcolor' : ['k', validate_color], # color of axis label
'axes.formatter.limits' : [[-7, 7], validate_nseq_int(2)],
# use scientific notation if log10
# of the axis range is smaller than the
# first or larger than the second
'axes.unicode_minus' : [True, validate_bool],
'polaraxes.grid' : [True, validate_bool], # display polar grid or not
#legend properties
'legend.fancybox' : [False,validate_bool],
'legend.loc' : ['upper right',validate_legend_loc], # at some point, this should be changed to 'best'
'legend.isaxes' : [True,validate_bool], # this option is internally ignored - it never served any useful purpose
'legend.numpoints' : [2, validate_int], # the number of points in the legend line
'legend.fontsize' : ['large', validate_fontsize],
'legend.pad' : [0, validate_float], # was 0.2, deprecated; the fractional whitespace inside the legend border
'legend.borderpad' : [0.4, validate_float], # units are fontsize
'legend.markerscale' : [1.0, validate_float], # the relative size of legend markers vs. original
# the following dimensions are in axes coords
'legend.labelsep' : [0.010, validate_float], # the vertical space between the legend entries
'legend.handlelen' : [0.05, validate_float], # the length of the legend lines
'legend.handletextsep' : [0.02, validate_float], # the space between the legend line and legend text
'legend.axespad' : [0.02, validate_float], # the border between the axes and legend edge
'legend.shadow' : [False, validate_bool],
'legend.labelspacing' : [0.5, validate_float], # the vertical space between the legend entries
'legend.handlelength' : [2., validate_float], # the length of the legend lines
'legend.handletextpad' : [.8, validate_float], # the space between the legend line and legend text
'legend.borderaxespad' : [0.5, validate_float], # the border between the axes and legend edge
'legend.columnspacing' : [2., validate_float], # the border between the axes and legend edge
'legend.markerscale' : [1.0, validate_float], # the relative size of legend markers vs. original
# the following dimensions are in axes coords
'legend.labelsep' : [0.010, validate_float], # the vertical space between the legend entries
'legend.handlelen' : [0.05, validate_float], # the length of the legend lines
'legend.handletextsep' : [0.02, validate_float], # the space between the legend line and legend text
'legend.axespad' : [0.5, validate_float], # the border between the axes and legend edge
'legend.shadow' : [False, validate_bool],
# tick properties
'xtick.major.size' : [4, validate_float], # major xtick size in points
'xtick.minor.size' : [2, validate_float], # minor xtick size in points
'xtick.major.pad' : [4, validate_float], # distance to label in points
'xtick.minor.pad' : [4, validate_float], # distance to label in points
'xtick.color' : ['k', validate_color], # color of the xtick labels
'xtick.labelsize' : ['medium', validate_fontsize], # fontsize of the xtick labels
'xtick.direction' : ['in', str], # direction of xticks
'ytick.major.size' : [4, validate_float], # major ytick size in points
'ytick.minor.size' : [2, validate_float], # minor ytick size in points
'ytick.major.pad' : [4, validate_float], # distance to label in points
'ytick.minor.pad' : [4, validate_float], # distance to label in points
'ytick.color' : ['k', validate_color], # color of the ytick labels
'ytick.labelsize' : ['medium', validate_fontsize], # fontsize of the ytick labels
'ytick.direction' : ['in', str], # direction of yticks
'grid.color' : ['k', validate_color], # grid color
'grid.linestyle' : [':', str], # dotted
'grid.linewidth' : [0.5, validate_float], # in points
# figure props
# figure size in inches: width by height
'figure.figsize' : [ [8.0,6.0], validate_nseq_float(2)],
'figure.dpi' : [ 80, validate_float], # DPI
'figure.facecolor' : [ '0.75', validate_color], # facecolor; scalar gray
'figure.edgecolor' : [ 'w', validate_color], # edgecolor; white
'figure.autolayout' : [ False, validate_autolayout],
'figure.subplot.left' : [0.125, ValidateInterval(0, 1, closedmin=True, closedmax=True)],
'figure.subplot.right' : [0.9, ValidateInterval(0, 1, closedmin=True, closedmax=True)],
'figure.subplot.bottom' : [0.1, ValidateInterval(0, 1, closedmin=True, closedmax=True)],
'figure.subplot.top' : [0.9, ValidateInterval(0, 1, closedmin=True, closedmax=True)],
'figure.subplot.wspace' : [0.2, ValidateInterval(0, 1, closedmin=True, closedmax=False)],
'figure.subplot.hspace' : [0.2, ValidateInterval(0, 1, closedmin=True, closedmax=False)],
'savefig.dpi' : [100, validate_float], # DPI
'savefig.facecolor' : ['w', validate_color], # facecolor; white
'savefig.edgecolor' : ['w', validate_color], # edgecolor; white
'savefig.orientation' : ['portrait', validate_orientation], # edgecolor; white
'cairo.format' : ['png', validate_cairo_format],
'tk.window_focus' : [False, validate_bool], # Maintain shell focus for TkAgg
'tk.pythoninspect' : [False, validate_bool], # Set PYTHONINSPECT
'ps.papersize' : ['letter', validate_ps_papersize], # Set the papersize/type
'ps.useafm' : [False, validate_bool], # Set PYTHONINSPECT
'ps.usedistiller' : [False, validate_ps_distiller], # use ghostscript or xpdf to distill ps output
'ps.distiller.res' : [6000, validate_int], # dpi
'ps.fonttype' : [3, validate_fonttype], # 3 (Type3) or 42 (Truetype)
'pdf.compression' : [6, validate_int], # compression level from 0 to 9; 0 to disable
'pdf.inheritcolor' : [False, validate_bool], # ignore any color-setting commands from the frontend
'pdf.use14corefonts' : [False, validate_bool], # use only the 14 PDF core fonts
# embedded in every PDF viewing application
'pdf.fonttype' : [3, validate_fonttype], # 3 (Type3) or 42 (Truetype)
'svg.image_inline' : [True, validate_bool], # write raster image data directly into the svg file
'svg.image_noscale' : [False, validate_bool], # suppress scaling of raster data embedded in SVG
'svg.embed_char_paths' : [True, validate_bool], # True to save all characters as paths in the SVG
'docstring.hardcopy' : [False, validate_bool], # set this when you want to generate hardcopy docstring
'plugins.directory' : ['.matplotlib_plugins', str], # where plugin directory is locate
'path.simplify' : [False, validate_bool],
'agg.path.chunksize' : [0, validate_int] # 0 to disable chunking;
# recommend about 20000 to
# enable. Experimental.
}
if __name__ == '__main__':
rc = defaultParams
rc['datapath'][0] = '/'
for key in rc:
if not rc[key][1](rc[key][0]) == rc[key][0]:
print "%s: %s != %s"%(key, rc[key][1](rc[key][0]), rc[key][0])
| agpl-3.0 |
ADEQUATeDQ/portalmonitor | odpw/web_rest/ui/odpw_ui_blueprint.py | 1 | 40986 | import ast
import datetime
import json
import time
from collections import defaultdict
from urlparse import urlparse
import StringIO
import csv
import jinja2
import pandas as pd
from bokeh.embed import components
from bokeh.resources import INLINE
from flask import Blueprint, current_app, render_template, jsonify, make_response, url_for
from markupsafe import Markup
from sqlalchemy import func, and_
from odpw.core.mat_views import withView, createView
from odpw.core.api import validURLDist, statusCodeDist, portalSnapshotQualityDF, getMetaResource, getResourceInfos
from odpw.utils.error_handling import errorStatus
from odpw.utils.plots import fetchProcessChart, qualityChart, qa, portalsScatter, evolutionCharts, \
systemEvolutionPlot, portalDynamicity
from odpw.core.db import row2dict
from odpw.core.model import Portal, PortalSnapshotQuality, PortalSnapshot, Dataset, DatasetData, DatasetQuality, \
MetaResource, ResourceInfo, PortalSnapshotDynamicity
from odpw.services.aggregates import aggregatePortalInfo
from odpw.utils.timing import Timer
from odpw.utils.utils_snapshot import getWeekString, getSnapshotfromTime, getPreviousWeek, getNextWeek, \
getLastNSnapshots, getCurrentSnapshot
from odpw.web_rest.cache import cache
from odpw.reporter import dataset_reporter
from odpw.reporter.contact_reporter import orgaReport, contactPerOrga
from schemadotorg import dcat_to_schemadotorg
ui = Blueprint('ui', __name__,
template_folder='../templates',
static_folder='../static',
)
# using the method
@jinja2.contextfilter
def get_domain(context, url):
return "%s" % urlparse(url).netloc
ui.add_app_template_filter(get_domain)
ui.add_app_template_filter(getWeekString)
##-----------Helper Functions -----------##
def render(templateName, data=None,**kwargs):
"""
FLask Jinja rendering function
:param templateName: jinja template name
:param data: json data for the template
:return: html
"""
portalCount=getPortalCount()
#print portalCount
#print kwargs
if data is None:
data={}
#print data
data['portalCount']=portalCount
with Timer(key="renderHTML", verbose=True) as t:
return render_template(templateName, data=data,**kwargs)
@cache.cached(timeout=300, key_prefix='getPortalCount')
def getPortalCount():
with Timer(key="getPortalCount", verbose=True):
return current_app.config['dbsession'].query(Portal.id).filter(Portal.active==True).count()
@ui.route('/', methods=['GET'])
def index():
return render('index.jinja')
@ui.route('/about', methods=['GET'])
def about():
return render('about.jinja')
@ui.route('/quality', methods=['GET'])
def qualitymetrics():
return render('quality_metrics.jinja',qa=qa)
@ui.route('/sparql', methods=['GET'])
def sparqlendpoint():
return render('sparql_endpoint.jinja')
@ui.route('/spec', methods=['GET'])
def spec():
return render('spec.json', data={'host':"localhost:5123/", 'basePath':"api"})
@ui.route('/api', methods=['GET'])
def apispec():
return render('apiui.jinja')
@ui.route('/timer', methods=['GET'])
def timer():
print Timer.getStats()
return render('timer.jinja', data={'stats':Timer.getStats()})
@ui.route('/system', methods=['GET'])
def system():
with Timer(key="system" , verbose=True):
return render("odpw_system_info.jinja")
@ui.route('/licensesearch', methods=['GET'])
@ui.route('/licensesearch/<path:uri>', methods=['GET'])
@cache.cached(timeout=60*60*24)
def licensesearch(uri=None):
with Timer(key="get_licensesearch" , verbose=True):
data={}
if uri != None:
cursn = getPreviousWeek(getSnapshotfromTime(datetime.datetime.now()))
Session = current_app.config['dbsession']
with Timer(key="query_licensesearch"):
q = Session.query(Dataset, DatasetData) \
.join(MetaResource, Dataset.md5 == MetaResource.md5) \
.join(DatasetData, Dataset.md5 == DatasetData.md5) \
.filter(Dataset.snapshot == cursn) \
.filter(MetaResource.uri == uri)
results=[]
for r in q:
results.append(row2dict(r))
data['uri'] = uri
data['snapshot'] = cursn
data['results']=results
return render("odpw_license_search.jinja", data=data)
@ui.route('/system/changes', methods=['GET'])
@cache.cached(timeout=60*60*24)
def systemchanges():
with Timer(key="get_systemchanges"):
Session=current_app.config['dbsession']
cursn=getSnapshotfromTime(datetime.datetime.now())
prevWeek=getPreviousWeek(cursn)
with Timer(key="query_systemchanges"):
data_cur={r.portalid:r for r in Session.query(PortalSnapshot).filter(PortalSnapshot.snapshot==cursn)}
data_prev={r.portalid:r for r in Session.query(PortalSnapshot).filter(PortalSnapshot.snapshot==prevWeek)}
data={'status_change':{},'ds_change':{},'res_change':{}}
for pid,ps in data_cur.items():
if pid in data_prev:
if ps.status == data_prev[pid].status:
if ps.datasetcount != data_prev[pid].datasetcount:
dsfrom=data_prev[pid].datasetcount if data_prev[pid].datasetcount is not None else 0
dsto=ps.datasetcount if ps.datasetcount is not None else 0
data['ds_change'][pid]={'from':dsfrom , 'to':dsto}
elif ps.resourcecount != data_prev[pid].resourcecount:
resfrom=data_prev[pid].resourcecount if data_prev[pid].resourcecount is not None else 0
resto= ps.resourcecount if ps.resourcecount is not None else 0
data['res_change'][pid]={'from':resfrom, 'to':resto}
else:
data['status_change'][pid]={'from':data_prev[pid].status, 'to':ps.status}
data['from']=prevWeek
data['to']=cursn
return render("odpw_system_changes.jinja", data=data)
@ui.route('/system/fetch', methods=['GET'])
@cache.cached(timeout=60*60*24)
def systemfetch():
with Timer(key="get_systemfetch"):
Session=current_app.config['dbsession']
cursn=getSnapshotfromTime(datetime.datetime.now())
snapshots=getLastNSnapshots(cursn,n=5)
nWeeksago=snapshots[-1]
cnts=defaultdict(int)
data={}
with Timer(key="query_systemfetch"):
for r in Session.query(PortalSnapshot.snapshot, PortalSnapshot.start, PortalSnapshot.end-PortalSnapshot.start).filter(PortalSnapshot.snapshot>nWeeksago):
sn,start, dur = r[0], r[1],r[2]
cnts[sn]+=1
d=data.setdefault(sn,{})
if dur is not None:
ds=d.setdefault(start,[])
ds.append(dur.total_seconds())
for sn, d in data.items():
dd=[]
gstart= min(d.keys())
for start, durations in d.items():
for dur in durations:
delta=( start-gstart).total_seconds() + dur
dd.append(delta)
data[sn]=dd
with Timer(key="plot_systemfetch"):
p= fetchProcessChart(data, cnts)
script, div= components(p)
js_resources = INLINE.render_js()
css_resources = INLINE.render_css()
return render("odpw_system_fetch.jinja",
plot_script=script,
plot_div=div,
js_resources=js_resources,
css_resources=css_resources
)
@ui.route('/system/evolution', methods=['GET'])
@cache.cached(timeout=60*60*24)
def systemevolv():
with Timer(key="get_systemevolv", verbose=True):
Session=current_app.config['dbsession']
with Timer(key="query_systemevolv", verbose=True):
t= Session.query(PortalSnapshot.snapshot.label('snapshot'), Portal.software,PortalSnapshot.datasetcount,PortalSnapshot.resourcecount).join(Portal).subquery()
q= Session.query(t.c.snapshot, t.c.software, func.count().label('count'),func.sum(t.c.resourcecount).label('resources'),func.sum(t.c.datasetcount).label('datasets')).group_by(t.c.snapshot,t.c.software)
data=[ row2dict(r) for r in q.all()]
df= pd.DataFrame(data)
with Timer(key="plot_systemevolv", verbose=True):
p=systemEvolutionPlot(df)
script, div= components(p)
js_resources = INLINE.render_js()
css_resources = INLINE.render_css()
return render("odpw_system_evolution.jinja",
plot_script=script,
plot_div=div,
js_resources=js_resources,
css_resources=css_resources
)
#--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--#
### PORTAL
#--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--#
@cache.cached(timeout=60*60*24*7, key_prefix='getPortalsInfo')
def getPortalsInfo():
with Timer(key="getPortalsInfo", verbose=True):
ps=[]
r=current_app.config['dbsession'].query(Portal, Portal.snapshot_count,Portal.first_snapshot, Portal.last_snapshot, Portal.datasetcount, Portal.resourcecount).filter(Portal.active==True)
for P in r:
#print 'P',P
d={}
d.update(row2dict(P[0]))
d['snCount']=P[1]
d['snFirst']=P[2]
d['snLast']=P[3]
d['datasets']=P[4]
d['resources']=P[5]
ps.append(d)
return ps
@ui.route('/portalslist', methods=['GET'])
@cache.cached(timeout=60*60*24)
def portalslist():
with Timer(key="get_portalslist", verbose=True):
ps=getPortalsInfo()
return render('odpw_portals.jinja', data={'portals':ps})
@ui.route('/portalstable', methods=['GET'])
@cache.cached(timeout=60*60*24)
def portalstable():
with Timer(key="get_portalstable", verbose=True):
ps=getPortalsInfo()
return render('odpw_portals_table.jinja', data={'portals':ps})
@ui.route('/portals/portalsdynamic', methods=['GET'])
@cache.cached(timeout=60*60*24)
def portalsdynamicy():
with Timer(key="get_portalsdynamicy", verbose=True):
snapshot = getSnapshotfromTime(datetime.datetime.now())
Session = current_app.config['dbsession']
with Timer(key="query_portalsdynamicy", verbose=True):
res = [r for r in
Session.query(Portal).join(PortalSnapshotDynamicity).filter(
PortalSnapshotDynamicity.snapshot == snapshot).add_entity(PortalSnapshotDynamicity)]
results=[]
keys = [
'dindex',
'changefrequ',
'adddelratio',
'dyratio',
'staticRatio',
'addRatio',
'delRatio',
'updatedRatio'
]
for r in res:
d=row2dict(r)
for k in keys:
d[k]=r[1].__getattribute__(k)
results.append(d)
df = pd.DataFrame(results)
for c in keys:
df[c] = df[c].convert_objects(convert_numeric=True)
return render('odpw_portals_dynamics.jinja', data={'portals':results}, keys=keys, snapshot=snapshot)
@ui.route('/portals/portalsquality', methods=['GET'])
@cache.cached(timeout=60*60*24)
def portalsquality():
with Timer(key="get_portalsquality", verbose=True):
Session=current_app.config['dbsession']
snapshot=getSnapshotfromTime(datetime.datetime.now())
with Timer(key="query_portalsquality"):
results=[row2dict(r) for r in Session.query(Portal, Portal.datasetcount, Portal.resourcecount).join(PortalSnapshotQuality).filter(PortalSnapshotQuality.snapshot==snapshot).add_entity(PortalSnapshotQuality)]
keys=[ i.lower() for q in qa for i in q['metrics'] ]
df=pd.DataFrame(results)
#print df
for c in keys:
#print c,df[c]
#print '___'*10
df[c]=df[c].convert_objects(convert_numeric=True)
dfiso= df.groupby(['iso'])
dfiso=dfiso.agg('mean')\
.join(pd.DataFrame(dfiso.size(),columns=['count']))
resultsIso= dfiso.reset_index().to_dict(orient='records')
dfsoft= df.groupby(['software'])
dfsoft=dfsoft.agg('mean')\
.join(pd.DataFrame(dfsoft.size(),columns=['count']))
resultSoft= dfsoft.reset_index().to_dict(orient='records')
return render('odpw_portals_quality.jinja', data={'portals':results,'iso':resultsIso,'soft':resultSoft}, keys=keys, snapshot=snapshot)
@ui.route('/portals/portalsstats', methods=['GET'])
@cache.cached(timeout=60*60*24)
def portalssize():
with Timer(key="get_portalsstats", verbose=True):
Session=current_app.config['dbsession']
with Timer(key="query_portalsstats", verbose=True):
results=[row2dict(r) for r in Session.query(Portal, Portal.snapshot_count,Portal.first_snapshot, Portal.last_snapshot, Portal.datasetcount, Portal.resourcecount)]
df=pd.DataFrame(results)
with Timer(key="plot_portalsstats", verbose=True):
p= portalsScatter(df)
script, div= components(p)
js_resources = INLINE.render_js()
css_resources = INLINE.render_css()
return render("odpw_portals_stats.jinja",
plot_script=script,
plot_div=div,
js_resources=js_resources,
css_resources=css_resources
)
#--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--#
### PORTAL
#--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--#
@cache.cached(timeout=60*60*24)
def getPortalInfos(Session, portalid, snapshot):
with Timer(key="get_getPortalInfos", verbose=True):
with Timer(key="query_getPortalInfos", verbose=True):
snapshots=[i[0] for i in Session.query(PortalSnapshot.snapshot).filter(PortalSnapshot.portalid==portalid).all()]
p=getPreviousWeek(snapshot)
p=p if p in snapshots else None
n=getNextWeek(snapshot)
n=n if n in snapshots else None
data={'snapshots':{'list':snapshots,'prev':p, 'next': n}}
return data
@ui.route('/portal', methods=['GET'])
@cache.cached(timeout=60*60*24)
def portaldash():
with Timer(key="get_portaldash", verbose=True):
data={}
cursn=getSnapshotfromTime(datetime.datetime.now())
Session=current_app.config['dbsession']
with Timer(key="query_portaldash", verbose=True):
data['portals']= [ row2dict(r) for r in Session.query(Portal).all()]
return render("odpw_portaldash.jinja", data=data, snapshot=cursn)
def getResourceInfo(session,dbc, portalid, snapshot, orga=None):
with Timer(key="getResourceInfo",verbose=True):
data={}
with Timer(key="query_getResourceInfoValid", verbose=True):
data['valid']={}
for valid in validURLDist(session,snapshot, portalid=portalid, orga=orga):
data['valid'][valid[0]]=valid[1]
with Timer(key="query_getResourceInfoStatus", verbose=True):
data['status']={}
if not orga:
viewName = "view_{}_{}_{}".format('resstatus', portalid, snapshot)
else:
viewName = "view_{}_{}_{}_{}".format('resstatus', portalid, snapshot, orga)
qorg = statusCodeDist(session,snapshot,portalid=portalid, orga=orga)
q = withView(qorg, viewName, session, dbc)
start = time.time()
for res in q:
data['status'][res[0]]=res[1]
end = time.time()
if (end - start) > 5:
print("Create View {}".format(viewName))
createView(qorg, viewName, session)
return {'resourcesInfo':data}
# @ui.route('/portal/<portalid>/<int:snapshot>', methods=['GET'])
# @cache.cached(timeout=60*60*24)
# def portal(snapshot, portalid):
# with Timer(key="get_portal",verbose=True):
#
# Session=current_app.config['dbsession']
# dbc=current_app.config['dbc']
# data=getPortalInfos(Session,portalid,snapshot)
#
# with Timer(key="query_portal",verbose=True):
# r=Session.query(Portal, Portal.datasetcount, Portal.resourcecount).filter(Portal.id==portalid)
# for P in r:
# data.update(row2dict(P[0]))
# data['datasets']=P[1]
# data['resources']=P[2]
# with Timer(key="query_portal_agg", verbose=True):
# data.update(aggregatePortalInfo(Session,portalid,snapshot,dbc))
# return render("odpw_portal.jinja", snapshot=snapshot, portalid=portalid,data=data)
@ui.route('/portal/<portalid>/', methods=['GET'])
@ui.route('/portal/<portalid>/<int:snapshot>', methods=['GET'])
@cache.cached(timeout=60*60*24)
def portal(portalid,snapshot=getSnapshotfromTime(datetime.datetime.now())):
with Timer(key="get_portal",verbose=True):
current_sn = snapshot
Session=current_app.config['dbsession']
data=getPortalInfos(Session,portalid,snapshot)
dynamicityEnabled = current_app.config.get('dynamicity', False)
with Timer(key="query_portal",verbose=True):
q = Session.query(Portal).filter(Portal.id == portalid) \
.join(PortalSnapshotQuality, PortalSnapshotQuality.portalid == Portal.id) \
.filter(PortalSnapshotQuality.snapshot == snapshot) \
.join(PortalSnapshot, PortalSnapshot.portalid == Portal.id) \
.filter(PortalSnapshot.snapshot == snapshot) \
.add_entity(PortalSnapshot) \
.add_entity(PortalSnapshotQuality)
if dynamicityEnabled:
q = q.join(PortalSnapshotDynamicity, PortalSnapshotDynamicity.portalid == Portal.id) \
.filter(PortalSnapshotDynamicity.snapshot == snapshot) \
.add_entity(PortalSnapshotDynamicity)
r = q.first()
while r is None:
snapshot= getPreviousWeek(snapshot)
q = Session.query(Portal).filter(Portal.id == portalid) \
.join(PortalSnapshotQuality, PortalSnapshotQuality.portalid == Portal.id) \
.filter(PortalSnapshotQuality.snapshot == snapshot) \
.join(PortalSnapshot, PortalSnapshot.portalid == Portal.id) \
.filter(PortalSnapshot.snapshot == snapshot) \
.add_entity(PortalSnapshot) \
.add_entity(PortalSnapshotQuality)
if dynamicityEnabled:
q = q.join(PortalSnapshotDynamicity, PortalSnapshotDynamicity.portalid == Portal.id) \
.filter(PortalSnapshotDynamicity.snapshot == snapshot) \
.add_entity(PortalSnapshotDynamicity)
r = q.first()
data['portal'] = row2dict(r[0])
data['fetchInfo'] = row2dict(r[1])
data['fetchInfo']['duration']=data['fetchInfo']['end']-data['fetchInfo']['start']
if dynamicityEnabled:
data['dynamicity'] = row2dict(r[3])
data['quality'] = row2dict(r[2])
#with Timer(key="query_portal_agg", verbose=True):
# data.update(aggregatePortalInfo(Session,portalid,snapshot,dbc))
return render("odpw_portal.jinja", snapshot=current_sn, portalid=portalid,data=data)
@ui.route('/portal/<portalid>/<int:snapshot>/report', methods=['GET'])
#@cache.cached(timeout=60*60*24)
def portalreport(portalid,snapshot=getSnapshotfromTime(datetime.datetime.now())):
with Timer(key="get_portal",verbose=True):
Session=current_app.config['dbsession']
data=getPortalInfos(Session,portalid,snapshot)
with Timer(key="query_portalreport",verbose=True):
q = Session.query(Dataset.organisation) \
.filter(Dataset.portalid == portalid) \
.filter(Dataset.snapshot == snapshot).distinct(Dataset.organisation)
data['organisations']=[ row2dict(res) for res in q]
return render("odpw_portal_report.jinja", snapshot=snapshot, portalid=portalid,data=data)
@ui.route('/portal/<portalid>/<int:snapshot>/report/<orga>', methods=['GET'])
#@cache.cached(timeout=60*60*24)
def portalOrgareport(portalid,orga,snapshot=getSnapshotfromTime(datetime.datetime.now())):
with Timer(key="get_portal",verbose=True):
Session=current_app.config['dbsession']
data=getPortalInfos(Session,portalid,snapshot)
with Timer(key="query_portalreport",verbose=True):
portal=Session.query(Portal).filter(Portal.id==portalid).first()
data['contacts']=contactPerOrga(Session, portal, snapshot, orga)
return render("odpw_portal_report_contacts.jinja", snapshot=snapshot, portalid=portalid,data=data,organisation=orga)
@ui.route('/portal/<portalid>/<int:snapshot>/report/<contact>/<orga>', methods=['GET'])
def portalOrgareportContact(portalid,contact,orga,snapshot=getSnapshotfromTime(datetime.datetime.now())):
print contact
Session = current_app.config['dbsession']
data = getPortalInfos(Session, portalid, snapshot)
with Timer(key="query_portalreport", verbose=True):
portal = Session.query(Portal).filter(Portal.id == portalid).first()
data['contact_report'] = orgaReport(Session, portal, snapshot, orga, contact=contact)
return render("odpw_portal_report_contact.jinja", snapshot=snapshot, portalid=portalid, data=data,
organisation=orga, contact=contact)
@ui.route('/portal/<portalid>/<int:snapshot>/resource/<path:uri>', methods=['GET'])
@cache.cached(timeout=60*60*24)
def resourceInfo(snapshot, portalid, uri):
with Timer(key="get_resourceInfo",verbose=True):
#print snapshot,portalid,uri
Session=current_app.config['dbsession']
dbc = current_app.config['dbc']
data=getPortalInfos(Session, portalid, snapshot)
with Timer(key="query_resources",verbose=True):
viewName = "view_{}_{}_{}".format('resinfo', portalid, snapshot)
qorg = getResourceInfos(Session, snapshot, portalid)
q = withView(qorg, viewName, Session, dbc)
start = time.time()
data['resources'] = [row2dict(r) for r in q.all()]
end = time.time()
if (end - start) > 5:
print("Create View {}".format(viewName))
try:
createView(qorg, viewName, Session)
except Exception as e:
if 'already exists' in e.message:
pass
else:
raise e
with Timer(key="query_resourceInfo", verbose=True):
q = Session.query(ResourceInfo) \
.filter(ResourceInfo.uri == uri)
#print q
data['resourceInfo'] = [row2dict(r) for r in q.all()]
for r in data['resourceInfo']:
if 'header' in r:
if r['header'] is None:
r['header']=""
else:
#print type(r['header']),r['header'],r
r['header'] = ast.literal_eval(str(r['header']))
return render("odpw_portal_resource.jinja", snapshot=snapshot, portalid=portalid, uri=uri, data=data)
@ui.route('/portal/<portalid>/linkcheck/csv', methods=['GET'])
def portalLinkCheckCSV(portalid):
with Timer(key="get_portalLinkCheckCSV",verbose=True):
si = StringIO.StringIO()
cw = csv.writer(si)
snapshot = getCurrentSnapshot()
Session=current_app.config['dbsession']
data=getPortalInfos(Session,portalid,snapshot)
with Timer(key="query_portalorgas",verbose=True):
q = Session.query(Dataset.organisation) \
.filter(Dataset.portalid == portalid) \
.filter(Dataset.snapshot == snapshot).distinct(Dataset.organisation)
data['organisations'] = [row2dict(res) for res in q]
for o in data['organisations']:
orga = o['organisation']
# with Timer(key="query_orga-emails", verbose=True):
# portal=Session.query(Portal).filter(Portal.id==portalid).first()
# # print('portal: ', portal, 'snapshot: ', snapshot, 'orga: ', orga)
# data['contacts'] = contactPerOrga(Session, portal, snapshot, orga)
# for cont in data['contacts']:
linkcheck = 'https://tools.adequate.at' + url_for('.orga_resources', portalid=portalid, snapshot=snapshot, orga=orga)
cw.writerow([orga, linkcheck])
output = make_response(si.getvalue())
output.headers["Content-Disposition"] = "attachment; filename=export.csv"
output.headers["Content-type"] = "text/csv"
return output
@ui.route('/portal/<portalid>/linkcheck/', methods=['GET'])
def portalDynamicLinkCheck(portalid):
snapshot = getCurrentSnapshot()
return portalLinkCheck(snapshot, portalid)
@ui.route('/portal/<portalid>/<int:snapshot>/linkcheck/', methods=['GET'])
@cache.cached(timeout=60*60*24)
def portalLinkCheck(snapshot, portalid):
Session = current_app.config['dbsession']
data = getPortalInfos(Session, portalid, snapshot)
q = Session.query(Dataset.organisation) \
.filter(Dataset.portalid == portalid) \
.filter(Dataset.snapshot == snapshot).distinct(Dataset.organisation)
data['organisations'] = [row2dict(res) for res in q]
return render("odpw_portal_linkchecker.jinja", snapshot=snapshot, portalid=portalid, data=data)
@ui.route('/portal/<portalid>/<int:snapshot>/resources', methods=['GET'])
@cache.cached(timeout=60*60*24)
def portalRes(portalid, snapshot=None):
if not snapshot:
snapshot = getCurrentSnapshot()
Session = current_app.config['dbsession']
data={}
data.update(getPortalInfos(Session, portalid, snapshot))
return render("odpw_portal_resources.jinja", data=data,snapshot=snapshot, portalid=portalid)
def getDatasetURI(datasetid, portalid):
session=current_app.config['dbsession']
p = session.query(Portal).filter(Portal.id == portalid).first()
if p.software == 'CKAN':
uri = '{0}/dataset/{1}'.format(p.apiuri.rstrip('/'), datasetid)
elif p.software == 'Socrata':
uri = '{0}/dataset/{1}'.format(p.uri.rstrip('/'), datasetid)
elif p.software == 'OpenDataSoft':
uri = '{0}/explore/dataset/{1}'.format(p.uri.rstrip('/'), datasetid)
else:
uri = datasetid
return uri
@ui.route('/portal/<portalid>/<int:snapshot>/linkcheck/<orga>', methods=['GET'])
@cache.cached(timeout=60*60*24)
def orga_resources(portalid, snapshot, orga):
Session = current_app.config['dbsession']
data = {}
data.update(getPortalInfos(Session, portalid, snapshot))
return render("odpw_portal_linkchecker_orga.jinja", data=data, snapshot=snapshot, portalid=portalid, organisation=orga)
@ui.route('/portal/<portalid>/<int:snapshot>/linkcheck/<orga>/body', methods=['GET'])
@cache.cached(timeout=60*60*24)
def orga_resource(portalid, snapshot, orga):
with Timer(key="get_orga_resource",verbose=True):
Session=current_app.config['dbsession']
dbc=current_app.config['dbc']
data = getResourceInfo(Session, dbc, portalid, snapshot, orga)
q = getResourceInfos(Session, snapshot, portalid, orga)
data['resList'] = []
for i in q:
dataset_id = i[1]
dataset_title = i[2]
orig_link = getDatasetURI(dataset_id, portalid)
data['resList'].append({'uri': row2dict(i[0]), 'dataset': {'uri': orig_link, 'title': dataset_title}})
data.update(getPortalInfos(Session,portalid,snapshot))
r=current_app.config['dbsession'].query(Portal.resourcecount).filter(Portal.id==portalid)
for P in r:
data['resources']=P[0]
return render("odpw_portal_resources_list.jinja", data=data, snapshot=snapshot, portalid=portalid)
@ui.route('/portal/<portalid>/<int:snapshot>/resources/body', methods=['GET'])
@cache.cached(timeout=60*60*24)
def portalResBody(snapshot, portalid):
with Timer(key="get_portalRes",verbose=True):
Session=current_app.config['dbsession']
dbc=current_app.config['dbc']
with Timer(key="query_portalRes", verbose=True):
data=getResourceInfo(Session,dbc, portalid, snapshot)
with Timer(key="query_getMetaResource", verbose=True):
viewName = "view_{}_{}_{}".format('resinfo', portalid, snapshot)
qorg = getResourceInfos(Session,snapshot, portalid)
q = withView(qorg, viewName, Session, dbc)
start = time.time()
data['resList'] = []
for i in q:
dataset_id = i[1]
dataset_title = i[2]
orig_link = getDatasetURI(dataset_id, portalid)
data['resList'].append({'uri': row2dict(i[0]), 'dataset': {'uri': orig_link, 'title': dataset_title}})
end = time.time()
if (end - start) > 5:
print("Create View {}".format(viewName))
createView(qorg, viewName, Session)
data.update(getPortalInfos(Session,portalid,snapshot))
#data['portals']= [ row2dict(r) for r in Session.query(Portal).all()]
with Timer(key="query_portalResourceCount", verbose=True):
r=current_app.config['dbsession'].query(Portal.resourcecount).filter(Portal.id==portalid)
ps=[]
for P in r:
data['resources']=P[0]
return render("odpw_portal_resources_list.jinja", data=data, snapshot=snapshot, portalid=portalid)
@ui.route('/portal/<portalid>/<int:snapshot>/evolution', methods=['GET'])
@cache.cached(timeout=60*60*24)
def portalEvolution(snapshot, portalid):
with Timer(key="get_portalEvolution",verbose=True):
Session=current_app.config['dbsession']
data={}
with Timer(key="query_portalEvolution", verbose=True):
for R in Session.query(PortalSnapshot).filter(PortalSnapshot.portalid==portalid):
data[R.portalid+str(R.snapshot)]=row2dict(R)
for R in Session.query(PortalSnapshotQuality).filter(PortalSnapshotQuality.portalid==portalid):
data[R.portalid+str(R.snapshot)].update(row2dict(R))
df=pd.DataFrame([v for k,v in data.items()])
with Timer(key="plot_portalEvolution", verbose=True):
p=evolutionCharts(df)
script, div= components(p)
js_resources = INLINE.render_js()
css_resources = INLINE.render_css()
data = getPortalInfos(Session,portalid,snapshot)
return render("odpw_portal_evolution.jinja",
plot_script=script
,plot_div=div
,js_resources=js_resources
,css_resources=css_resources
,snapshot=snapshot
, portalid=portalid
, data=data
)
@ui.route('/portal/<portalid>/<int:snapshot>/dist/formats', methods=['GET'])
@cache.cached(timeout=60*60*24)
def portalFormats(snapshot, portalid):
with Timer(key="get_portalFormatDist",verbose=True):
Session=current_app.config['dbsession']
dbc = current_app.config['dbc']
data = getPortalInfos(Session, portalid, snapshot)
with Timer(key="query_portalFormatDist", verbose=True):
data['portals']= [ row2dict(r) for r in Session.query(Portal).all()]
data.update(aggregatePortalInfo(Session,portalid,snapshot, dbc, limit=None))
return render("odpw_portal_dist.jinja", data=data, snapshot=snapshot, portalid=portalid)
@ui.route('/portal/<portalid>/<int:snapshot>/dist/licenses', methods=['GET'])
@cache.cached(timeout=60*60*24)
def portalLicenses(snapshot, portalid):
with Timer(key="get_portalLicenseDist",verbose=True):
Session=current_app.config['dbsession']
dbc=current_app.config['dbc']
data = getPortalInfos(Session,portalid,snapshot)
with Timer(key="query_portalLicenseDist", verbose=True):
data['portals']= [ row2dict(r) for r in Session.query(Portal).all()]
data.update(aggregatePortalInfo(Session,portalid,snapshot, dbc, limit=None))
return render("odpw_portal_dist.jinja", data=data, snapshot=snapshot, portalid=portalid)
@ui.route('/portal/<portalid>/<int:snapshot>/dist/organisations', methods=['GET'])
@cache.cached(timeout=60*60*24)
def portalOrganisations(snapshot, portalid):
with Timer(key="portalRes",verbose=True):
Session=current_app.config['dbsession']
dbc = current_app.config['dbc']
data = getPortalInfos(Session,portalid,snapshot)
data['portals']= [ row2dict(r) for r in Session.query(Portal).all()]
data.update(aggregatePortalInfo(Session,portalid,snapshot, dbc,limit=None))
return render("odpw_portal_dist.jinja", data=data, snapshot=snapshot, portalid=portalid)
def getPortalDatasets(Session, portalid,snapshot):
with Timer(key="getPortalDatasets",verbose=True):
return {"datasets": [ row2dict(r) for r in Session.query(Dataset.title, Dataset.id).filter(Dataset.portalid==portalid).filter(Dataset.snapshot==snapshot).all()]}
@ui.route('/portal/<portalid>/<int:snapshot>/dataset', methods=['GET'], defaults={'dataset': None})
@ui.route('/portal/<portalid>/dataset/<path:dataset>', methods=['GET'], defaults={'snapshot': None})
@ui.route('/portal/<portalid>/<int:snapshot>/dataset/<path:dataset>', methods=['GET'])
@cache.cached(timeout=60*60*24)
def portalDataset(snapshot, portalid, dataset):
with Timer(key="portalDataset",verbose=True):
if not snapshot:
snapshot = getCurrentSnapshot()
Session=current_app.config['dbsession']
data = getPortalInfos(Session,portalid,snapshot)
#data['portals']= [ row2dict(r) for r in Session.query(Portal).all()]
data.update(getPortalDatasets(Session, portalid, snapshot))
dd=None
if dataset:
for dt in data['datasets']:
if dt['id']==dataset:
dd=dt
break
with Timer(key="getPortalDatasets_datasetData",verbose=True):
r= Session.query(DatasetData).join(Dataset).filter(Dataset.id==dataset).join(DatasetQuality).add_entity(DatasetQuality).first()
data['datasetData']=row2dict(r)
software = Session.query(Portal.software).filter(Portal.id==portalid).first()[0]
if software == 'Socrata':
data['json']=data['datasetData']['raw']['view']
else:
data['json']=data['datasetData']['raw']
data['report']=dataset_reporter.report(r[0],r[1], software=None)
#with Timer(key="getSchemadotorgDatasets", verbose=True):
# q = Session.query(Portal).filter(Portal.id == portalid)
# p = q.first()
# schemadotorg = json.dumps(dcat_to_schemadotorg.convert(p, r[0]), indent=3)
with Timer(key="getPortalDatasets_resources",verbose=True):
q= Session.query(MetaResource,ResourceInfo).filter(MetaResource.md5==r[0].md5).outerjoin(ResourceInfo, and_( ResourceInfo.uri==MetaResource.uri,ResourceInfo.snapshot==snapshot))
data['resources']=[row2dict(r) for r in q.all()]
for r in data['resources']:
if 'header' in r and isinstance(r['header'], basestring):
r['header']=ast.literal_eval(r['header'])
with Timer(key="getPortalDatasets_versions",verbose=True):
q=Session.query(Dataset.md5, func.min(Dataset.snapshot).label('min'), func.max(Dataset.snapshot).label('max')).filter(Dataset.id==dataset).group_by(Dataset.md5)
r=[row2dict(r) for r in q.all()]
print r
versions={}
for i in r:
a=versions.setdefault(i['md5'],[])
a.append({'min':i['min'],'max':i['max']})
data['versions']=r
return render("odpw_portal_dataset.jinja", data=data, snapshot=snapshot, portalid=portalid, dataset=dd, qa=qa, error=errorStatus)
@ui.route('/portal/<portalid>/<int:snapshot>/csvw', methods=['GET'])
@cache.cached(timeout=60*60*24)
def portalCSVW(snapshot, portalid):
with Timer(key="portalCSVW",verbose=True):
Session=current_app.config['dbsession']
data = getPortalInfos(Session,portalid,snapshot)
data['portals']= [ row2dict(r) for r in Session.query(Portal).all()]
data.update(getPortalDatasets(Session, portalid, snapshot))
q = Session.query(Dataset, MetaResource)\
.filter(MetaResource.md5==Dataset.md5)\
.filter(MetaResource.format=='csv')\
.filter(Dataset.portalid==portalid)\
.filter(Dataset.snapshot==snapshot)
data['resources']=[row2dict(r) for r in q.all()]
return render("odpw_portal_csvw.jinja", data=data, snapshot=snapshot, portalid=portalid, qa=qa, error=errorStatus)
@ui.route('/portal/<portalid>/<int:snapshot>/quality', methods=['GET'])
@cache.cached(timeout=60*60*24)
def portalQuality(snapshot, portalid):
with Timer(key="portalQuality",verbose=True):
Session = current_app.config['dbsession']
df=portalSnapshotQualityDF(Session, portalid, snapshot)
q = Session.query(PortalSnapshotQuality) \
.filter(PortalSnapshotQuality.portalid == portalid) \
.filter(PortalSnapshotQuality.snapshot == snapshot)
qdata = None
for r in q:
qdata = row2dict(r)
break
d = []
datasets = int(qdata['datasets'])
for inD in qa:
for k, v in inD['metrics'].items():
k = k.lower()
# TODO what to do if metric has no value?
if qdata[k] != None and qdata[k] != 'None':
value = float(qdata[k])
perc = int(qdata[k + 'N']) / (datasets * 1.0) if datasets > 0 else 0
c = {'Metric': k, 'Dimension': inD['dimension'],
'dim_color': inD['color'], 'value': value, 'perc': perc}
c.update(v)
d.append(c)
data = getPortalInfos(Session,portalid,snapshot)
js_resources = INLINE.render_js()
css_resources = INLINE.render_css()
if d:
df= pd.DataFrame(d)
with Timer(key="dataDF", verbose=True) as t:
p= qualityChart(df)
script, div= components(p)
data['portals']= [ row2dict(r) for r in Session.query(Portal).all()]
data['quality']=qdata
return render("odpw_portal_quality.jinja",
plot_script=script
,plot_div=div
,js_resources=js_resources
,css_resources=css_resources
,snapshot=snapshot
, portalid=portalid
, data=data
, qa=qa
)
else:
return render("odpw_portal_quality.jinja", snapshot=snapshot, js_resources=js_resources, css_resources=css_resources, portalid=portalid, data=data, qa=qa)
@ui.route('/portal/<portalid>/<int:snapshot>/dynamics', methods=['GET'])
@cache.cached(timeout=60*60*24)
def portalDynamicy(snapshot, portalid):
Session = current_app.config['dbsession']
q= Session.query(PortalSnapshotDynamicity).filter(PortalSnapshotDynamicity.portalid==portalid).filter(PortalSnapshotDynamicity.snapshot<=snapshot)
data=[]
keys = [
'dindex',
'changefrequ',
'adddelratio',
'dyratio',
'staticRatio',
'addRatio',
'delRatio',
'updatedRatio'
]
for psd in q:
d = row2dict(psd)
for k in keys:
d[k] = psd.__getattribute__(k)
data.append(d)
df = pd.DataFrame(data)
with Timer(key="dynPlot", verbose=True) as t:
p = portalDynamicity(df)
script, div = components(p)
js_resources = INLINE.render_js()
css_resources = INLINE.render_css()
data = getPortalInfos(Session, portalid, snapshot)
data['portals'] = [row2dict(r) for r in Session.query(Portal).all()]
return render("odpw_portal_dynamicity.jinja",
plot_script=script
, plot_div=div
, js_resources=js_resources
, css_resources=css_resources
, snapshot=snapshot
, portalid=portalid
, data=data
)
| gpl-3.0 |
saiwing-yeung/scikit-learn | sklearn/svm/tests/test_sparse.py | 35 | 13182 | from nose.tools import assert_raises, assert_true, assert_false
import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.svm.tests import test_svm
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import assert_warns, assert_raise_message
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
if isinstance(dense_svm, svm.OneClassSVM):
msg = "cannot use sparse input in 'OneClassSVM' trained on dense data"
else:
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0,
decision_function_shape='ovo')
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0,
decision_function_shape='ovo')
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
#Test decision_function
#Sanity check, test that decision_function implemented in python
#returns the same as the one in libsvm
# multi class:
svc = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo')
clf = svc.fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_oneclasssvm():
"""Check that sparse OneClassSVM gives the same result as dense OneClassSVM"""
# many class dataset:
X_blobs, _ = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, None, T], [X2_sp, None, T2],
[X_blobs[:80], None, X_blobs[80:]],
[iris.data, None, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.OneClassSVM(kernel=kernel, random_state=0)
sp_clf = svm.OneClassSVM(kernel=kernel, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catches some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
| bsd-3-clause |
nomadcube/scikit-learn | sklearn/covariance/tests/test_covariance.py | 142 | 11068 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print(np.amin(mahal_dist), np.amax(mahal_dist))
assert(np.amin(mahal_dist) > 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
nikitasingh981/scikit-learn | benchmarks/bench_covertype.py | 57 | 7378 | """
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <[email protected]>
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
# Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
# Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
# Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3),
'SAG': LogisticRegression(solver='sag', max_iter=2, C=1000)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
| bsd-3-clause |
xyguo/scikit-learn | sklearn/neighbors/nearest_centroid.py | 34 | 7347 | # -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_array, check_X_y, check_is_fitted
from ..utils.sparsefuncs import csc_median_axis_0
from ..utils.multiclass import check_classification_targets
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
Parameters
----------
metric: string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
The centroids for the samples corresponding to each class is the point
from which the sum of the distances (according to the metric) of all
samples that belong to that particular class are minimized.
If the "manhattan" metric is provided, this centroid is the median and
for all other metrics, the centroid is now set to be the mean.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
centroids_ : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == 'manhattan':
X, y = check_X_y(X, y, ['csc'])
else:
X, y = check_X_y(X, y, ['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
check_classification_targets(y)
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to its members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in range(n_classes):
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
# XXX: Update other averaging methods according to the metrics.
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else:
if self.metric != 'euclidean':
warnings.warn("Averaging for metrics other than "
"euclidean and manhattan not supported. "
"The average is set to be the mean."
)
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1. / nk) + (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
check_is_fitted(self, 'centroids_')
X = check_array(X, accept_sparse='csr')
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
| bsd-3-clause |
cbertinato/pandas | pandas/tests/indexing/multiindex/test_sorted.py | 1 | 3368 | import numpy as np
from numpy.random import randn
from pandas import DataFrame, MultiIndex, Series
from pandas.util import testing as tm
class TestMultiIndexSorted:
def test_getitem_multilevel_index_tuple_not_sorted(self):
index_columns = list("abc")
df = DataFrame([[0, 1, 0, "x"], [0, 0, 1, "y"]],
columns=index_columns + ["data"])
df = df.set_index(index_columns)
query_index = df.index[:1]
rs = df.loc[query_index, "data"]
xp_idx = MultiIndex.from_tuples([(0, 1, 0)], names=['a', 'b', 'c'])
xp = Series(['x'], index=xp_idx, name='data')
tm.assert_series_equal(rs, xp)
def test_getitem_slice_not_sorted(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
df = frame.sort_index(level=1).T
# buglet with int typechecking
result = df.iloc[:, :np.int32(3)]
expected = df.reindex(columns=df.columns[:3])
tm.assert_frame_equal(result, expected)
def test_frame_getitem_not_sorted2(self):
# 13431
df = DataFrame({'col1': ['b', 'd', 'b', 'a'],
'col2': [3, 1, 1, 2],
'data': ['one', 'two', 'three', 'four']})
df2 = df.set_index(['col1', 'col2'])
df2_original = df2.copy()
df2.index.set_levels(['b', 'd', 'a'], level='col1', inplace=True)
df2.index.set_codes([0, 1, 0, 2], level='col1', inplace=True)
assert not df2.index.is_lexsorted()
assert not df2.index.is_monotonic
assert df2_original.index.equals(df2.index)
expected = df2.sort_index()
assert expected.index.is_lexsorted()
assert expected.index.is_monotonic
result = df2.sort_index(level=0)
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
def test_frame_getitem_not_sorted(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
df = frame.T
df['foo', 'four'] = 'foo'
arrays = [np.array(x) for x in zip(*df.columns.values)]
result = df['foo']
result2 = df.loc[:, 'foo']
expected = df.reindex(columns=df.columns[arrays[0] == 'foo'])
expected.columns = expected.columns.droplevel(0)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
df = df.T
result = df.xs('foo')
result2 = df.loc['foo']
expected = df.reindex(df.index[arrays[0] == 'foo'])
expected.index = expected.index.droplevel(0)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
def test_series_getitem_not_sorted(self):
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = zip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
arrays = [np.array(x) for x in zip(*index.values)]
result = s['qux']
result2 = s.loc['qux']
expected = s[arrays[0] == 'qux']
expected.index = expected.index.droplevel(0)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
| bsd-3-clause |
amolkahat/pandas | pandas/io/excel.py | 1 | 62157 | """
Module parse to/from Excel
"""
import abc
import os
import warnings
# ---------------------------------------------------------------------
# ExcelFile class
from datetime import MINYEAR, date, datetime, time, timedelta
from distutils.version import LooseVersion
from io import UnsupportedOperation
from textwrap import fill
import numpy as np
import pandas._libs.json as json
import pandas.compat as compat
from pandas.compat import (
OrderedDict, add_metaclass, lrange, map, range, reduce, string_types, u,
zip
)
from pandas.core import config
from pandas.core.dtypes.common import (
is_bool, is_float, is_integer, is_list_like
)
from pandas.core.frame import DataFrame
from pandas.errors import EmptyDataError
from pandas.io.common import (
_NA_VALUES, _is_url, _stringify_path, _urlopen, _validate_header_arg,
get_filepath_or_buffer
)
from pandas.io.formats.printing import pprint_thing
from pandas.io.parsers import TextParser
from pandas.util._decorators import Appender, deprecate_kwarg
__all__ = ["read_excel", "ExcelWriter", "ExcelFile"]
_writer_extensions = ["xlsx", "xls", "xlsm"]
_writers = {}
_read_excel_doc = """
Read an Excel table into a pandas DataFrame
Parameters
----------
io : string, path object (pathlib.Path or py._path.local.LocalPath),
file-like object, pandas ExcelFile, or xlrd workbook.
The string could be a URL. Valid URL schemes include http, ftp, s3,
gcs, and file. For file URLs, a host is expected. For instance, a local
file could be file://localhost/path/to/workbook.xlsx
sheet_name : string, int, mixed list of strings/ints, or None, default 0
Strings are used for sheet names, Integers are used in zero-indexed
sheet positions.
Lists of strings/integers are used to request multiple sheets.
Specify None to get all sheets.
str|int -> DataFrame is returned.
list|None -> Dict of DataFrames is returned, with keys representing
sheets.
Available Cases
* Defaults to 0 -> 1st sheet as a DataFrame
* 1 -> 2nd sheet as a DataFrame
* "Sheet1" -> 1st sheet as a DataFrame
* [0,1,"Sheet5"] -> 1st, 2nd & 5th sheet as a dictionary of DataFrames
* None -> All sheets as a dictionary of DataFrames
sheetname : string, int, mixed list of strings/ints, or None, default 0
.. deprecated:: 0.21.0
Use `sheet_name` instead
header : int, list of ints, default 0
Row (0-indexed) to use for the column labels of the parsed
DataFrame. If a list of integers is passed those row positions will
be combined into a ``MultiIndex``. Use None if there is no header.
names : array-like, default None
List of column names to use. If file contains no header row,
then you should explicitly pass header=None
index_col : int, list of ints, default None
Column (0-indexed) to use as the row labels of the DataFrame.
Pass None if there is no such column. If a list is passed,
those columns will be combined into a ``MultiIndex``. If a
subset of data is selected with ``usecols``, index_col
is based on the subset.
parse_cols : int or list, default None
.. deprecated:: 0.21.0
Pass in `usecols` instead.
usecols : int or list, default None
* If None then parse all columns,
* If int then indicates last column to be parsed
* If list of ints then indicates list of column numbers to be parsed
* If string then indicates comma separated list of Excel column letters and
column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of
both sides.
squeeze : boolean, default False
If the parsed data only contains one column then return a Series
dtype : Type name or dict of column -> type, default None
Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32}
Use `object` to preserve data as stored in Excel and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
.. versionadded:: 0.20.0
engine: string, default None
If io is not a buffer or path, this must be set to identify io.
Acceptable values are None or xlrd
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the Excel cell content, and return the transformed
content.
true_values : list, default None
Values to consider as True
.. versionadded:: 0.19.0
false_values : list, default None
Values to consider as False
.. versionadded:: 0.19.0
skiprows : list-like
Rows to skip at the beginning (0-indexed)
nrows : int, default None
Number of rows to parse
.. versionadded:: 0.23.0
na_values : scalar, str, list-like, or dict, default None
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted
as NaN: '""" + fill("', '".join(sorted(_NA_VALUES)), 70, subsequent_indent=" ") + """'.
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to.
verbose : boolean, default False
Indicate number of NA values placed in non-numeric columns
thousands : str, default None
Thousands separator for parsing string columns to numeric. Note that
this parameter is only necessary for columns stored as TEXT in Excel,
any numeric columns will automatically be parsed, regardless of display
format.
comment : str, default None
Comments out remainder of line. Pass a character or characters to this
argument to indicate comments in the input file. Any data between the
comment string and the end of the current line is ignored.
skip_footer : int, default 0
.. deprecated:: 0.23.0
Pass in `skipfooter` instead.
skipfooter : int, default 0
Rows at the end to skip (0-indexed)
convert_float : boolean, default True
convert integral floats to int (i.e., 1.0 --> 1). If False, all numeric
data will be read in as floats: Excel stores all numbers as floats
internally
Returns
-------
parsed : DataFrame or Dict of DataFrames
DataFrame from the passed in Excel file. See notes in sheet_name
argument for more information on when a Dict of Dataframes is returned.
Examples
--------
An example DataFrame written to a local file
>>> df_out = pd.DataFrame([('string1', 1),
... ('string2', 2),
... ('string3', 3)],
... columns=['Name', 'Value'])
>>> df_out
Name Value
0 string1 1
1 string2 2
2 string3 3
>>> df_out.to_excel('tmp.xlsx')
The file can be read using the file name as string or an open file object:
>>> pd.read_excel('tmp.xlsx')
Name Value
0 string1 1
1 string2 2
2 string3 3
>>> pd.read_excel(open('tmp.xlsx','rb'))
Name Value
0 string1 1
1 string2 2
2 string3 3
Index and header can be specified via the `index_col` and `header` arguments
>>> pd.read_excel('tmp.xlsx', index_col=None, header=None)
0 1 2
0 NaN Name Value
1 0.0 string1 1
2 1.0 string2 2
3 2.0 string3 3
Column types are inferred but can be explicitly specified
>>> pd.read_excel('tmp.xlsx', dtype={'Name':str, 'Value':float})
Name Value
0 string1 1.0
1 string2 2.0
2 string3 3.0
True, False, and NA values, and thousands separators have defaults,
but can be explicitly specified, too. Supply the values you would like
as strings or lists of strings!
>>> pd.read_excel('tmp.xlsx',
... na_values=['string1', 'string2'])
Name Value
0 NaN 1
1 NaN 2
2 string3 3
Comment lines in the excel input file can be skipped using the `comment` kwarg
>>> df = pd.DataFrame({'a': ['1', '#2'], 'b': ['2', '3']})
>>> df.to_excel('tmp.xlsx', index=False)
>>> pd.read_excel('tmp.xlsx')
a b
0 1 2
1 #2 3
>>> pd.read_excel('tmp.xlsx', comment='#')
a b
0 1 2
"""
def register_writer(klass):
"""Adds engine to the excel writer registry. You must use this method to
integrate with ``to_excel``. Also adds config options for any new
``supported_extensions`` defined on the writer."""
if not compat.callable(klass):
raise ValueError("Can only register callables as engines")
engine_name = klass.engine
_writers[engine_name] = klass
for ext in klass.supported_extensions:
if ext.startswith('.'):
ext = ext[1:]
if ext not in _writer_extensions:
config.register_option("io.excel.{ext}.writer".format(ext=ext),
engine_name, validator=str)
_writer_extensions.append(ext)
def _get_default_writer(ext):
_default_writers = {'xlsx': 'openpyxl', 'xlsm': 'openpyxl', 'xls': 'xlwt'}
try:
import xlsxwriter # noqa
_default_writers['xlsx'] = 'xlsxwriter'
except ImportError:
pass
return _default_writers[ext]
def get_writer(engine_name):
try:
return _writers[engine_name]
except KeyError:
raise ValueError("No Excel writer '{engine}'"
.format(engine=engine_name))
@Appender(_read_excel_doc)
@deprecate_kwarg("parse_cols", "usecols")
@deprecate_kwarg("skip_footer", "skipfooter")
def read_excel(io,
sheet_name=0,
header=0,
names=None,
index_col=None,
usecols=None,
squeeze=False,
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skiprows=None,
nrows=None,
na_values=None,
parse_dates=False,
date_parser=None,
thousands=None,
comment=None,
skipfooter=0,
convert_float=True,
**kwds):
# Can't use _deprecate_kwarg since sheetname=None has a special meaning
if is_integer(sheet_name) and sheet_name == 0 and 'sheetname' in kwds:
warnings.warn("The `sheetname` keyword is deprecated, use "
"`sheet_name` instead", FutureWarning, stacklevel=2)
sheet_name = kwds.pop("sheetname")
if 'sheet' in kwds:
raise TypeError("read_excel() got an unexpected keyword argument "
"`sheet`")
if not isinstance(io, ExcelFile):
io = ExcelFile(io, engine=engine)
return io.parse(
sheet_name=sheet_name,
header=header,
names=names,
index_col=index_col,
usecols=usecols,
squeeze=squeeze,
dtype=dtype,
converters=converters,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
parse_dates=parse_dates,
date_parser=date_parser,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
convert_float=convert_float,
**kwds)
class ExcelFile(object):
"""
Class for parsing tabular excel sheets into DataFrame objects.
Uses xlrd. See read_excel for more documentation
Parameters
----------
io : string, path object (pathlib.Path or py._path.local.LocalPath),
file-like object or xlrd workbook
If a string or path object, expected to be a path to xls or xlsx file
engine: string, default None
If io is not a buffer or path, this must be set to identify io.
Acceptable values are None or xlrd
"""
def __init__(self, io, **kwds):
err_msg = "Install xlrd >= 0.9.0 for Excel support"
try:
import xlrd
except ImportError:
raise ImportError(err_msg)
else:
ver = tuple(map(int, xlrd.__VERSION__.split(".")[:2]))
if ver < (0, 9): # pragma: no cover
raise ImportError(err_msg +
". Current version " + xlrd.__VERSION__)
# could be a str, ExcelFile, Book, etc.
self.io = io
# Always a string
self._io = _stringify_path(io)
engine = kwds.pop('engine', None)
if engine is not None and engine != 'xlrd':
raise ValueError("Unknown engine: {engine}".format(engine=engine))
# If io is a url, want to keep the data as bytes so can't pass
# to get_filepath_or_buffer()
if _is_url(self._io):
io = _urlopen(self._io)
elif not isinstance(self.io, (ExcelFile, xlrd.Book)):
io, _, _, _ = get_filepath_or_buffer(self._io)
if engine == 'xlrd' and isinstance(io, xlrd.Book):
self.book = io
elif not isinstance(io, xlrd.Book) and hasattr(io, "read"):
# N.B. xlrd.Book has a read attribute too
if hasattr(io, 'seek'):
try:
# GH 19779
io.seek(0)
except UnsupportedOperation:
# HTTPResponse does not support seek()
# GH 20434
pass
data = io.read()
self.book = xlrd.open_workbook(file_contents=data)
elif isinstance(self._io, compat.string_types):
self.book = xlrd.open_workbook(self._io)
else:
raise ValueError('Must explicitly set engine if not passing in'
' buffer or path for io.')
def __fspath__(self):
return self._io
def parse(self,
sheet_name=0,
header=0,
names=None,
index_col=None,
usecols=None,
squeeze=False,
converters=None,
true_values=None,
false_values=None,
skiprows=None,
nrows=None,
na_values=None,
parse_dates=False,
date_parser=None,
thousands=None,
comment=None,
skipfooter=0,
convert_float=True,
**kwds):
"""
Parse specified sheet(s) into a DataFrame
Equivalent to read_excel(ExcelFile, ...) See the read_excel
docstring for more info on accepted parameters
"""
# Can't use _deprecate_kwarg since sheetname=None has a special meaning
if is_integer(sheet_name) and sheet_name == 0 and 'sheetname' in kwds:
warnings.warn("The `sheetname` keyword is deprecated, use "
"`sheet_name` instead", FutureWarning, stacklevel=2)
sheet_name = kwds.pop("sheetname")
elif 'sheetname' in kwds:
raise TypeError("Cannot specify both `sheet_name` "
"and `sheetname`. Use just `sheet_name`")
return self._parse_excel(sheet_name=sheet_name,
header=header,
names=names,
index_col=index_col,
usecols=usecols,
squeeze=squeeze,
converters=converters,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
parse_dates=parse_dates,
date_parser=date_parser,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
convert_float=convert_float,
**kwds)
def _should_parse(self, i, usecols):
def _range2cols(areas):
"""
Convert comma separated list of column names and column ranges to a
list of 0-based column indexes.
>>> _range2cols('A:E')
[0, 1, 2, 3, 4]
>>> _range2cols('A,C,Z:AB')
[0, 2, 25, 26, 27]
"""
def _excel2num(x):
"Convert Excel column name like 'AB' to 0-based column index"
return reduce(lambda s, a: s * 26 + ord(a) - ord('A') + 1,
x.upper().strip(), 0) - 1
cols = []
for rng in areas.split(','):
if ':' in rng:
rng = rng.split(':')
cols += lrange(_excel2num(rng[0]), _excel2num(rng[1]) + 1)
else:
cols.append(_excel2num(rng))
return cols
if isinstance(usecols, int):
return i <= usecols
elif isinstance(usecols, compat.string_types):
return i in _range2cols(usecols)
else:
return i in usecols
def _parse_excel(self,
sheet_name=0,
header=0,
names=None,
index_col=None,
usecols=None,
squeeze=False,
dtype=None,
true_values=None,
false_values=None,
skiprows=None,
nrows=None,
na_values=None,
verbose=False,
parse_dates=False,
date_parser=None,
thousands=None,
comment=None,
skipfooter=0,
convert_float=True,
**kwds):
_validate_header_arg(header)
if 'chunksize' in kwds:
raise NotImplementedError("chunksize keyword of read_excel "
"is not implemented")
if parse_dates is True and index_col is None:
warnings.warn("The 'parse_dates=True' keyword of read_excel was "
"provided without an 'index_col' keyword value.")
import xlrd
from xlrd import (xldate, XL_CELL_DATE,
XL_CELL_ERROR, XL_CELL_BOOLEAN,
XL_CELL_NUMBER)
epoch1904 = self.book.datemode
def _parse_cell(cell_contents, cell_typ):
"""converts the contents of the cell into a pandas
appropriate object"""
if cell_typ == XL_CELL_DATE:
if xlrd_0_9_3:
# Use the newer xlrd datetime handling.
try:
cell_contents = \
xldate.xldate_as_datetime(cell_contents,
epoch1904)
except OverflowError:
return cell_contents
# Excel doesn't distinguish between dates and time,
# so we treat dates on the epoch as times only.
# Also, Excel supports 1900 and 1904 epochs.
year = (cell_contents.timetuple())[0:3]
if ((not epoch1904 and year == (1899, 12, 31)) or
(epoch1904 and year == (1904, 1, 1))):
cell_contents = time(cell_contents.hour,
cell_contents.minute,
cell_contents.second,
cell_contents.microsecond)
else:
# Use the xlrd <= 0.9.2 date handling.
try:
dt = xldate.xldate_as_tuple(cell_contents, epoch1904)
except xldate.XLDateTooLarge:
return cell_contents
if dt[0] < MINYEAR:
cell_contents = time(*dt[3:])
else:
cell_contents = datetime(*dt)
elif cell_typ == XL_CELL_ERROR:
cell_contents = np.nan
elif cell_typ == XL_CELL_BOOLEAN:
cell_contents = bool(cell_contents)
elif convert_float and cell_typ == XL_CELL_NUMBER:
# GH5394 - Excel 'numbers' are always floats
# it's a minimal perf hit and less surprising
val = int(cell_contents)
if val == cell_contents:
cell_contents = val
return cell_contents
# xlrd >= 0.9.3 can return datetime objects directly.
if LooseVersion(xlrd.__VERSION__) >= LooseVersion("0.9.3"):
xlrd_0_9_3 = True
else:
xlrd_0_9_3 = False
ret_dict = False
# Keep sheetname to maintain backwards compatibility.
if isinstance(sheet_name, list):
sheets = sheet_name
ret_dict = True
elif sheet_name is None:
sheets = self.sheet_names
ret_dict = True
else:
sheets = [sheet_name]
# handle same-type duplicates.
sheets = list(OrderedDict.fromkeys(sheets).keys())
output = OrderedDict()
for asheetname in sheets:
if verbose:
print("Reading sheet {sheet}".format(sheet=asheetname))
if isinstance(asheetname, compat.string_types):
sheet = self.book.sheet_by_name(asheetname)
else: # assume an integer if not a string
sheet = self.book.sheet_by_index(asheetname)
data = []
should_parse = {}
for i in range(sheet.nrows):
row = []
for j, (value, typ) in enumerate(zip(sheet.row_values(i),
sheet.row_types(i))):
if usecols is not None and j not in should_parse:
should_parse[j] = self._should_parse(j, usecols)
if usecols is None or should_parse[j]:
row.append(_parse_cell(value, typ))
data.append(row)
if sheet.nrows == 0:
output[asheetname] = DataFrame()
continue
if is_list_like(header) and len(header) == 1:
header = header[0]
# forward fill and pull out names for MultiIndex column
header_names = None
if header is not None:
if is_list_like(header):
header_names = []
control_row = [True] * len(data[0])
for row in header:
if is_integer(skiprows):
row += skiprows
data[row], control_row = _fill_mi_header(
data[row], control_row)
header_name, data[row] = _pop_header_name(
data[row], index_col)
header_names.append(header_name)
else:
data[header] = _trim_excel_header(data[header])
if is_list_like(index_col):
# forward fill values for MultiIndex index
if not is_list_like(header):
offset = 1 + header
else:
offset = 1 + max(header)
for col in index_col:
last = data[offset][col]
for row in range(offset + 1, len(data)):
if data[row][col] == '' or data[row][col] is None:
data[row][col] = last
else:
last = data[row][col]
has_index_names = is_list_like(header) and len(header) > 1
# GH 12292 : error when read one empty column from excel file
try:
parser = TextParser(data,
header=header,
index_col=index_col,
has_index_names=has_index_names,
squeeze=squeeze,
dtype=dtype,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
parse_dates=parse_dates,
date_parser=date_parser,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
**kwds)
output[asheetname] = parser.read(nrows=nrows)
if names is not None:
output[asheetname].columns = names
if not squeeze or isinstance(output[asheetname], DataFrame):
output[asheetname].columns = output[
asheetname].columns.set_names(header_names)
except EmptyDataError:
# No Data, return an empty DataFrame
output[asheetname] = DataFrame()
if ret_dict:
return output
else:
return output[asheetname]
@property
def sheet_names(self):
return self.book.sheet_names()
def close(self):
"""close io if necessary"""
if hasattr(self.io, 'close'):
self.io.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def _validate_freeze_panes(freeze_panes):
if freeze_panes is not None:
if (
len(freeze_panes) == 2 and
all(isinstance(item, int) for item in freeze_panes)
):
return True
raise ValueError("freeze_panes must be of form (row, column)"
" where row and column are integers")
# freeze_panes wasn't specified, return False so it won't be applied
# to output sheet
return False
def _trim_excel_header(row):
# trim header row so auto-index inference works
# xlrd uses '' , openpyxl None
while len(row) > 0 and (row[0] == '' or row[0] is None):
row = row[1:]
return row
def _fill_mi_header(row, control_row):
"""Forward fills blank entries in row, but only inside the same parent index
Used for creating headers in Multiindex.
Parameters
----------
row : list
List of items in a single row.
control_row : list of boolean
Helps to determine if particular column is in same parent index as the
previous value. Used to stop propagation of empty cells between
different indexes.
Returns
----------
Returns changed row and control_row
"""
last = row[0]
for i in range(1, len(row)):
if not control_row[i]:
last = row[i]
if row[i] == '' or row[i] is None:
row[i] = last
else:
control_row[i] = False
last = row[i]
return row, control_row
# fill blank if index_col not None
def _pop_header_name(row, index_col):
""" (header, new_data) for header rows in MultiIndex parsing"""
none_fill = lambda x: None if x == '' else x
if index_col is None:
# no index col specified, trim data for inference path
return none_fill(row[0]), row[1:]
else:
# pop out header name and fill w/ blank
i = index_col if not is_list_like(index_col) else max(index_col)
return none_fill(row[i]), row[:i] + [''] + row[i + 1:]
@add_metaclass(abc.ABCMeta)
class ExcelWriter(object):
"""
Class for writing DataFrame objects into excel sheets, default is to use
xlwt for xls, openpyxl for xlsx. See DataFrame.to_excel for typical usage.
Parameters
----------
path : string
Path to xls or xlsx file.
engine : string (optional)
Engine to use for writing. If None, defaults to
``io.excel.<extension>.writer``. NOTE: can only be passed as a keyword
argument.
date_format : string, default None
Format string for dates written into Excel files (e.g. 'YYYY-MM-DD')
datetime_format : string, default None
Format string for datetime objects written into Excel files
(e.g. 'YYYY-MM-DD HH:MM:SS')
mode : {'w' or 'a'}, default 'w'
File mode to use (write or append).
.. versionadded:: 0.24.0
Notes
-----
None of the methods and properties are considered public.
For compatibility with CSV writers, ExcelWriter serializes lists
and dicts to strings before writing.
Examples
--------
Default usage:
>>> with ExcelWriter('path_to_file.xlsx') as writer:
... df.to_excel(writer)
To write to separate sheets in a single file:
>>> with ExcelWriter('path_to_file.xlsx') as writer:
... df1.to_excel(writer, sheet_name='Sheet1')
... df2.to_excel(writer, sheet_name='Sheet2')
You can set the date format or datetime format:
>>> with ExcelWriter('path_to_file.xlsx',
date_format='YYYY-MM-DD',
datetime_format='YYYY-MM-DD HH:MM:SS') as writer:
... df.to_excel(writer)
You can also append to an existing Excel file:
>>> with ExcelWriter('path_to_file.xlsx', mode='a') as writer:
... df.to_excel(writer, sheet_name='Sheet3')
Attributes
----------
None
Methods
-------
None
"""
# Defining an ExcelWriter implementation (see abstract methods for more...)
# - Mandatory
# - ``write_cells(self, cells, sheet_name=None, startrow=0, startcol=0)``
# --> called to write additional DataFrames to disk
# - ``supported_extensions`` (tuple of supported extensions), used to
# check that engine supports the given extension.
# - ``engine`` - string that gives the engine name. Necessary to
# instantiate class directly and bypass ``ExcelWriterMeta`` engine
# lookup.
# - ``save(self)`` --> called to save file to disk
# - Mostly mandatory (i.e. should at least exist)
# - book, cur_sheet, path
# - Optional:
# - ``__init__(self, path, engine=None, **kwargs)`` --> always called
# with path as first argument.
# You also need to register the class with ``register_writer()``.
# Technically, ExcelWriter implementations don't need to subclass
# ExcelWriter.
def __new__(cls, path, engine=None, **kwargs):
# only switch class if generic(ExcelWriter)
if issubclass(cls, ExcelWriter):
if engine is None or (isinstance(engine, string_types) and
engine == 'auto'):
if isinstance(path, string_types):
ext = os.path.splitext(path)[-1][1:]
else:
ext = 'xlsx'
try:
engine = config.get_option('io.excel.{ext}.writer'
.format(ext=ext))
if engine == 'auto':
engine = _get_default_writer(ext)
except KeyError:
error = ValueError("No engine for filetype: '{ext}'"
.format(ext=ext))
raise error
cls = get_writer(engine)
return object.__new__(cls)
# declare external properties you can count on
book = None
curr_sheet = None
path = None
@abc.abstractproperty
def supported_extensions(self):
"extensions that writer engine supports"
pass
@abc.abstractproperty
def engine(self):
"name of engine"
pass
@abc.abstractmethod
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,
freeze_panes=None):
"""
Write given formatted cells into Excel an excel sheet
Parameters
----------
cells : generator
cell of formatted data to save to Excel sheet
sheet_name : string, default None
Name of Excel sheet, if None, then use self.cur_sheet
startrow: upper left cell row to dump data frame
startcol: upper left cell column to dump data frame
freeze_panes: integer tuple of length 2
contains the bottom-most row and right-most column to freeze
"""
pass
@abc.abstractmethod
def save(self):
"""
Save workbook to disk.
"""
pass
def __init__(self, path, engine=None,
date_format=None, datetime_format=None, mode='w',
**engine_kwargs):
# validate that this engine can handle the extension
if isinstance(path, string_types):
ext = os.path.splitext(path)[-1]
else:
ext = 'xls' if engine == 'xlwt' else 'xlsx'
self.check_extension(ext)
self.path = path
self.sheets = {}
self.cur_sheet = None
if date_format is None:
self.date_format = 'YYYY-MM-DD'
else:
self.date_format = date_format
if datetime_format is None:
self.datetime_format = 'YYYY-MM-DD HH:MM:SS'
else:
self.datetime_format = datetime_format
self.mode = mode
def __fspath__(self):
return _stringify_path(self.path)
def _get_sheet_name(self, sheet_name):
if sheet_name is None:
sheet_name = self.cur_sheet
if sheet_name is None: # pragma: no cover
raise ValueError('Must pass explicit sheet_name or set '
'cur_sheet property')
return sheet_name
def _value_with_fmt(self, val):
"""Convert numpy types to Python types for the Excel writers.
Parameters
----------
val : object
Value to be written into cells
Returns
-------
Tuple with the first element being the converted value and the second
being an optional format
"""
fmt = None
if is_integer(val):
val = int(val)
elif is_float(val):
val = float(val)
elif is_bool(val):
val = bool(val)
elif isinstance(val, datetime):
fmt = self.datetime_format
elif isinstance(val, date):
fmt = self.date_format
elif isinstance(val, timedelta):
val = val.total_seconds() / float(86400)
fmt = '0'
else:
val = compat.to_str(val)
return val, fmt
@classmethod
def check_extension(cls, ext):
"""checks that path's extension against the Writer's supported
extensions. If it isn't supported, raises UnsupportedFiletypeError."""
if ext.startswith('.'):
ext = ext[1:]
if not any(ext in extension for extension in cls.supported_extensions):
msg = (u("Invalid extension for engine '{engine}': '{ext}'")
.format(engine=pprint_thing(cls.engine),
ext=pprint_thing(ext)))
raise ValueError(msg)
else:
return True
# Allow use as a contextmanager
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""synonym for save, to make it more file-like"""
return self.save()
class _OpenpyxlWriter(ExcelWriter):
engine = 'openpyxl'
supported_extensions = ('.xlsx', '.xlsm')
def __init__(self, path, engine=None, mode='w', **engine_kwargs):
# Use the openpyxl module as the Excel writer.
from openpyxl.workbook import Workbook
super(_OpenpyxlWriter, self).__init__(path, mode=mode, **engine_kwargs)
if self.mode == 'a': # Load from existing workbook
from openpyxl import load_workbook
book = load_workbook(self.path)
self.book = book
else:
# Create workbook object with default optimized_write=True.
self.book = Workbook()
if self.book.worksheets:
try:
self.book.remove(self.book.worksheets[0])
except AttributeError:
# compat - for openpyxl <= 2.4
self.book.remove_sheet(self.book.worksheets[0])
def save(self):
"""
Save workbook to disk.
"""
return self.book.save(self.path)
@classmethod
def _convert_to_style(cls, style_dict):
"""
converts a style_dict to an openpyxl style object
Parameters
----------
style_dict: style dictionary to convert
"""
from openpyxl.style import Style
xls_style = Style()
for key, value in style_dict.items():
for nk, nv in value.items():
if key == "borders":
(xls_style.borders.__getattribute__(nk)
.__setattr__('border_style', nv))
else:
xls_style.__getattribute__(key).__setattr__(nk, nv)
return xls_style
@classmethod
def _convert_to_style_kwargs(cls, style_dict):
"""
Convert a style_dict to a set of kwargs suitable for initializing
or updating-on-copy an openpyxl v2 style object
Parameters
----------
style_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'font'
'fill'
'border' ('borders')
'alignment'
'number_format'
'protection'
Returns
-------
style_kwargs : dict
A dict with the same, normalized keys as ``style_dict`` but each
value has been replaced with a native openpyxl style object of the
appropriate class.
"""
_style_key_map = {
'borders': 'border',
}
style_kwargs = {}
for k, v in style_dict.items():
if k in _style_key_map:
k = _style_key_map[k]
_conv_to_x = getattr(cls, '_convert_to_{k}'.format(k=k),
lambda x: None)
new_v = _conv_to_x(v)
if new_v:
style_kwargs[k] = new_v
return style_kwargs
@classmethod
def _convert_to_color(cls, color_spec):
"""
Convert ``color_spec`` to an openpyxl v2 Color object
Parameters
----------
color_spec : str, dict
A 32-bit ARGB hex string, or a dict with zero or more of the
following keys.
'rgb'
'indexed'
'auto'
'theme'
'tint'
'index'
'type'
Returns
-------
color : openpyxl.styles.Color
"""
from openpyxl.styles import Color
if isinstance(color_spec, str):
return Color(color_spec)
else:
return Color(**color_spec)
@classmethod
def _convert_to_font(cls, font_dict):
"""
Convert ``font_dict`` to an openpyxl v2 Font object
Parameters
----------
font_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'name'
'size' ('sz')
'bold' ('b')
'italic' ('i')
'underline' ('u')
'strikethrough' ('strike')
'color'
'vertAlign' ('vertalign')
'charset'
'scheme'
'family'
'outline'
'shadow'
'condense'
Returns
-------
font : openpyxl.styles.Font
"""
from openpyxl.styles import Font
_font_key_map = {
'sz': 'size',
'b': 'bold',
'i': 'italic',
'u': 'underline',
'strike': 'strikethrough',
'vertalign': 'vertAlign',
}
font_kwargs = {}
for k, v in font_dict.items():
if k in _font_key_map:
k = _font_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
font_kwargs[k] = v
return Font(**font_kwargs)
@classmethod
def _convert_to_stop(cls, stop_seq):
"""
Convert ``stop_seq`` to a list of openpyxl v2 Color objects,
suitable for initializing the ``GradientFill`` ``stop`` parameter.
Parameters
----------
stop_seq : iterable
An iterable that yields objects suitable for consumption by
``_convert_to_color``.
Returns
-------
stop : list of openpyxl.styles.Color
"""
return map(cls._convert_to_color, stop_seq)
@classmethod
def _convert_to_fill(cls, fill_dict):
"""
Convert ``fill_dict`` to an openpyxl v2 Fill object
Parameters
----------
fill_dict : dict
A dict with one or more of the following keys (or their synonyms),
'fill_type' ('patternType', 'patterntype')
'start_color' ('fgColor', 'fgcolor')
'end_color' ('bgColor', 'bgcolor')
or one or more of the following keys (or their synonyms).
'type' ('fill_type')
'degree'
'left'
'right'
'top'
'bottom'
'stop'
Returns
-------
fill : openpyxl.styles.Fill
"""
from openpyxl.styles import PatternFill, GradientFill
_pattern_fill_key_map = {
'patternType': 'fill_type',
'patterntype': 'fill_type',
'fgColor': 'start_color',
'fgcolor': 'start_color',
'bgColor': 'end_color',
'bgcolor': 'end_color',
}
_gradient_fill_key_map = {
'fill_type': 'type',
}
pfill_kwargs = {}
gfill_kwargs = {}
for k, v in fill_dict.items():
pk = gk = None
if k in _pattern_fill_key_map:
pk = _pattern_fill_key_map[k]
if k in _gradient_fill_key_map:
gk = _gradient_fill_key_map[k]
if pk in ['start_color', 'end_color']:
v = cls._convert_to_color(v)
if gk == 'stop':
v = cls._convert_to_stop(v)
if pk:
pfill_kwargs[pk] = v
elif gk:
gfill_kwargs[gk] = v
else:
pfill_kwargs[k] = v
gfill_kwargs[k] = v
try:
return PatternFill(**pfill_kwargs)
except TypeError:
return GradientFill(**gfill_kwargs)
@classmethod
def _convert_to_side(cls, side_spec):
"""
Convert ``side_spec`` to an openpyxl v2 Side object
Parameters
----------
side_spec : str, dict
A string specifying the border style, or a dict with zero or more
of the following keys (or their synonyms).
'style' ('border_style')
'color'
Returns
-------
side : openpyxl.styles.Side
"""
from openpyxl.styles import Side
_side_key_map = {
'border_style': 'style',
}
if isinstance(side_spec, str):
return Side(style=side_spec)
side_kwargs = {}
for k, v in side_spec.items():
if k in _side_key_map:
k = _side_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
side_kwargs[k] = v
return Side(**side_kwargs)
@classmethod
def _convert_to_border(cls, border_dict):
"""
Convert ``border_dict`` to an openpyxl v2 Border object
Parameters
----------
border_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'left'
'right'
'top'
'bottom'
'diagonal'
'diagonal_direction'
'vertical'
'horizontal'
'diagonalUp' ('diagonalup')
'diagonalDown' ('diagonaldown')
'outline'
Returns
-------
border : openpyxl.styles.Border
"""
from openpyxl.styles import Border
_border_key_map = {
'diagonalup': 'diagonalUp',
'diagonaldown': 'diagonalDown',
}
border_kwargs = {}
for k, v in border_dict.items():
if k in _border_key_map:
k = _border_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
if k in ['left', 'right', 'top', 'bottom', 'diagonal']:
v = cls._convert_to_side(v)
border_kwargs[k] = v
return Border(**border_kwargs)
@classmethod
def _convert_to_alignment(cls, alignment_dict):
"""
Convert ``alignment_dict`` to an openpyxl v2 Alignment object
Parameters
----------
alignment_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'horizontal'
'vertical'
'text_rotation'
'wrap_text'
'shrink_to_fit'
'indent'
Returns
-------
alignment : openpyxl.styles.Alignment
"""
from openpyxl.styles import Alignment
return Alignment(**alignment_dict)
@classmethod
def _convert_to_number_format(cls, number_format_dict):
"""
Convert ``number_format_dict`` to an openpyxl v2.1.0 number format
initializer.
Parameters
----------
number_format_dict : dict
A dict with zero or more of the following keys.
'format_code' : str
Returns
-------
number_format : str
"""
return number_format_dict['format_code']
@classmethod
def _convert_to_protection(cls, protection_dict):
"""
Convert ``protection_dict`` to an openpyxl v2 Protection object.
Parameters
----------
protection_dict : dict
A dict with zero or more of the following keys.
'locked'
'hidden'
Returns
-------
"""
from openpyxl.styles import Protection
return Protection(**protection_dict)
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,
freeze_panes=None):
# Write the frame cells using openpyxl.
sheet_name = self._get_sheet_name(sheet_name)
_style_cache = {}
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.create_sheet()
wks.title = sheet_name
self.sheets[sheet_name] = wks
if _validate_freeze_panes(freeze_panes):
wks.freeze_panes = wks.cell(row=freeze_panes[0] + 1,
column=freeze_panes[1] + 1)
for cell in cells:
xcell = wks.cell(
row=startrow + cell.row + 1,
column=startcol + cell.col + 1
)
xcell.value, fmt = self._value_with_fmt(cell.val)
if fmt:
xcell.number_format = fmt
style_kwargs = {}
if cell.style:
key = str(cell.style)
style_kwargs = _style_cache.get(key)
if style_kwargs is None:
style_kwargs = self._convert_to_style_kwargs(cell.style)
_style_cache[key] = style_kwargs
if style_kwargs:
for k, v in style_kwargs.items():
setattr(xcell, k, v)
if cell.mergestart is not None and cell.mergeend is not None:
wks.merge_cells(
start_row=startrow + cell.row + 1,
start_column=startcol + cell.col + 1,
end_column=startcol + cell.mergeend + 1,
end_row=startrow + cell.mergestart + 1
)
# When cells are merged only the top-left cell is preserved
# The behaviour of the other cells in a merged range is
# undefined
if style_kwargs:
first_row = startrow + cell.row + 1
last_row = startrow + cell.mergestart + 1
first_col = startcol + cell.col + 1
last_col = startcol + cell.mergeend + 1
for row in range(first_row, last_row + 1):
for col in range(first_col, last_col + 1):
if row == first_row and col == first_col:
# Ignore first cell. It is already handled.
continue
xcell = wks.cell(column=col, row=row)
for k, v in style_kwargs.items():
setattr(xcell, k, v)
register_writer(_OpenpyxlWriter)
class _XlwtWriter(ExcelWriter):
engine = 'xlwt'
supported_extensions = ('.xls',)
def __init__(self, path, engine=None, encoding=None, mode='w',
**engine_kwargs):
# Use the xlwt module as the Excel writer.
import xlwt
engine_kwargs['engine'] = engine
if mode == 'a':
raise ValueError('Append mode is not supported with xlwt!')
super(_XlwtWriter, self).__init__(path, mode=mode, **engine_kwargs)
if encoding is None:
encoding = 'ascii'
self.book = xlwt.Workbook(encoding=encoding)
self.fm_datetime = xlwt.easyxf(num_format_str=self.datetime_format)
self.fm_date = xlwt.easyxf(num_format_str=self.date_format)
def save(self):
"""
Save workbook to disk.
"""
return self.book.save(self.path)
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,
freeze_panes=None):
# Write the frame cells using xlwt.
sheet_name = self._get_sheet_name(sheet_name)
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.add_sheet(sheet_name)
self.sheets[sheet_name] = wks
if _validate_freeze_panes(freeze_panes):
wks.set_panes_frozen(True)
wks.set_horz_split_pos(freeze_panes[0])
wks.set_vert_split_pos(freeze_panes[1])
style_dict = {}
for cell in cells:
val, fmt = self._value_with_fmt(cell.val)
stylekey = json.dumps(cell.style)
if fmt:
stylekey += fmt
if stylekey in style_dict:
style = style_dict[stylekey]
else:
style = self._convert_to_style(cell.style, fmt)
style_dict[stylekey] = style
if cell.mergestart is not None and cell.mergeend is not None:
wks.write_merge(startrow + cell.row,
startrow + cell.mergestart,
startcol + cell.col,
startcol + cell.mergeend,
val, style)
else:
wks.write(startrow + cell.row,
startcol + cell.col,
val, style)
@classmethod
def _style_to_xlwt(cls, item, firstlevel=True, field_sep=',',
line_sep=';'):
"""helper which recursively generate an xlwt easy style string
for example:
hstyle = {"font": {"bold": True},
"border": {"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin"},
"align": {"horiz": "center"}}
will be converted to
font: bold on; \
border: top thin, right thin, bottom thin, left thin; \
align: horiz center;
"""
if hasattr(item, 'items'):
if firstlevel:
it = ["{key}: {val}"
.format(key=key, val=cls._style_to_xlwt(value, False))
for key, value in item.items()]
out = "{sep} ".format(sep=(line_sep).join(it))
return out
else:
it = ["{key} {val}"
.format(key=key, val=cls._style_to_xlwt(value, False))
for key, value in item.items()]
out = "{sep} ".format(sep=(field_sep).join(it))
return out
else:
item = "{item}".format(item=item)
item = item.replace("True", "on")
item = item.replace("False", "off")
return item
@classmethod
def _convert_to_style(cls, style_dict, num_format_str=None):
"""
converts a style_dict to an xlwt style object
Parameters
----------
style_dict: style dictionary to convert
num_format_str: optional number format string
"""
import xlwt
if style_dict:
xlwt_stylestr = cls._style_to_xlwt(style_dict)
style = xlwt.easyxf(xlwt_stylestr, field_sep=',', line_sep=';')
else:
style = xlwt.XFStyle()
if num_format_str is not None:
style.num_format_str = num_format_str
return style
register_writer(_XlwtWriter)
class _XlsxStyler(object):
# Map from openpyxl-oriented styles to flatter xlsxwriter representation
# Ordering necessary for both determinism and because some are keyed by
# prefixes of others.
STYLE_MAPPING = {
'font': [
(('name',), 'font_name'),
(('sz',), 'font_size'),
(('size',), 'font_size'),
(('color', 'rgb',), 'font_color'),
(('color',), 'font_color'),
(('b',), 'bold'),
(('bold',), 'bold'),
(('i',), 'italic'),
(('italic',), 'italic'),
(('u',), 'underline'),
(('underline',), 'underline'),
(('strike',), 'font_strikeout'),
(('vertAlign',), 'font_script'),
(('vertalign',), 'font_script'),
],
'number_format': [
(('format_code',), 'num_format'),
((), 'num_format',),
],
'protection': [
(('locked',), 'locked'),
(('hidden',), 'hidden'),
],
'alignment': [
(('horizontal',), 'align'),
(('vertical',), 'valign'),
(('text_rotation',), 'rotation'),
(('wrap_text',), 'text_wrap'),
(('indent',), 'indent'),
(('shrink_to_fit',), 'shrink'),
],
'fill': [
(('patternType',), 'pattern'),
(('patterntype',), 'pattern'),
(('fill_type',), 'pattern'),
(('start_color', 'rgb',), 'fg_color'),
(('fgColor', 'rgb',), 'fg_color'),
(('fgcolor', 'rgb',), 'fg_color'),
(('start_color',), 'fg_color'),
(('fgColor',), 'fg_color'),
(('fgcolor',), 'fg_color'),
(('end_color', 'rgb',), 'bg_color'),
(('bgColor', 'rgb',), 'bg_color'),
(('bgcolor', 'rgb',), 'bg_color'),
(('end_color',), 'bg_color'),
(('bgColor',), 'bg_color'),
(('bgcolor',), 'bg_color'),
],
'border': [
(('color', 'rgb',), 'border_color'),
(('color',), 'border_color'),
(('style',), 'border'),
(('top', 'color', 'rgb',), 'top_color'),
(('top', 'color',), 'top_color'),
(('top', 'style',), 'top'),
(('top',), 'top'),
(('right', 'color', 'rgb',), 'right_color'),
(('right', 'color',), 'right_color'),
(('right', 'style',), 'right'),
(('right',), 'right'),
(('bottom', 'color', 'rgb',), 'bottom_color'),
(('bottom', 'color',), 'bottom_color'),
(('bottom', 'style',), 'bottom'),
(('bottom',), 'bottom'),
(('left', 'color', 'rgb',), 'left_color'),
(('left', 'color',), 'left_color'),
(('left', 'style',), 'left'),
(('left',), 'left'),
],
}
@classmethod
def convert(cls, style_dict, num_format_str=None):
"""
converts a style_dict to an xlsxwriter format dict
Parameters
----------
style_dict: style dictionary to convert
num_format_str: optional number format string
"""
# Create a XlsxWriter format object.
props = {}
if num_format_str is not None:
props['num_format'] = num_format_str
if style_dict is None:
return props
if 'borders' in style_dict:
style_dict = style_dict.copy()
style_dict['border'] = style_dict.pop('borders')
for style_group_key, style_group in style_dict.items():
for src, dst in cls.STYLE_MAPPING.get(style_group_key, []):
# src is a sequence of keys into a nested dict
# dst is a flat key
if dst in props:
continue
v = style_group
for k in src:
try:
v = v[k]
except (KeyError, TypeError):
break
else:
props[dst] = v
if isinstance(props.get('pattern'), string_types):
# TODO: support other fill patterns
props['pattern'] = 0 if props['pattern'] == 'none' else 1
for k in ['border', 'top', 'right', 'bottom', 'left']:
if isinstance(props.get(k), string_types):
try:
props[k] = ['none', 'thin', 'medium', 'dashed', 'dotted',
'thick', 'double', 'hair', 'mediumDashed',
'dashDot', 'mediumDashDot', 'dashDotDot',
'mediumDashDotDot',
'slantDashDot'].index(props[k])
except ValueError:
props[k] = 2
if isinstance(props.get('font_script'), string_types):
props['font_script'] = ['baseline', 'superscript',
'subscript'].index(props['font_script'])
if isinstance(props.get('underline'), string_types):
props['underline'] = {'none': 0, 'single': 1, 'double': 2,
'singleAccounting': 33,
'doubleAccounting': 34}[props['underline']]
return props
class _XlsxWriter(ExcelWriter):
engine = 'xlsxwriter'
supported_extensions = ('.xlsx',)
def __init__(self, path, engine=None,
date_format=None, datetime_format=None, mode='w',
**engine_kwargs):
# Use the xlsxwriter module as the Excel writer.
import xlsxwriter
if mode == 'a':
raise ValueError('Append mode is not supported with xlsxwriter!')
super(_XlsxWriter, self).__init__(path, engine=engine,
date_format=date_format,
datetime_format=datetime_format,
mode=mode,
**engine_kwargs)
self.book = xlsxwriter.Workbook(path, **engine_kwargs)
def save(self):
"""
Save workbook to disk.
"""
return self.book.close()
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,
freeze_panes=None):
# Write the frame cells using xlsxwriter.
sheet_name = self._get_sheet_name(sheet_name)
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.add_worksheet(sheet_name)
self.sheets[sheet_name] = wks
style_dict = {'null': None}
if _validate_freeze_panes(freeze_panes):
wks.freeze_panes(*(freeze_panes))
for cell in cells:
val, fmt = self._value_with_fmt(cell.val)
stylekey = json.dumps(cell.style)
if fmt:
stylekey += fmt
if stylekey in style_dict:
style = style_dict[stylekey]
else:
style = self.book.add_format(
_XlsxStyler.convert(cell.style, fmt))
style_dict[stylekey] = style
if cell.mergestart is not None and cell.mergeend is not None:
wks.merge_range(startrow + cell.row,
startcol + cell.col,
startrow + cell.mergestart,
startcol + cell.mergeend,
cell.val, style)
else:
wks.write(startrow + cell.row,
startcol + cell.col,
val, style)
register_writer(_XlsxWriter)
| bsd-3-clause |
joernhees/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 19 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n // 2]
Y_train = Y[:n // 2]
X_test = X[n // 2:]
Y_test = Y[n // 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of components exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = cca.transform(X_train, Y_train)
X_test_r, Y_test_r = cca.transform(X_test, Y_test)
| bsd-3-clause |
QudevETH/PycQED_py3 | pycqed/analysis/utils/__init__.py | 2 | 7673 | import numpy as np
from fit_toolbox import fit
from fit_toolbox import functions as fn
from fit_toolbox import guess_initial_values as giv
import matplotlib.pyplot as plt
from time import time
from qt import Data
import os
import cmath
def store_scripts(filepath):
from shutil import copytree
import os
if not os.path.exists(os.path.join(filepath, 'scripts')):
#fol,fil = os.path.split(fp)
copytree('D:\\qtlab\\scripts', os.path.join(filepath, 'scripts'))
else:
print('scripts already stored')
# resonator
def find_datestr(tstr):
lookfor = tstr
print(lookfor)
for root, dirs, files in os.walk('D:\qtlab\data'):
# print "searching", root
for dir in dirs:
if lookfor in dir:
datestr = root
print("found: %s at %s" % (lookfor, datestr))
break
return datestr[-8:]
def littleEndian(data, bytes=4):
return [(data >> ofs) & 0xFF for ofs in (0, 8, 16, 24)[:bytes]]
def fit_S21_resonance_complex(*args, **kw):
'''
input:
x,y
returns fitresults with:
fr[0]=(A,f0,Q,s21min,slope,theta)
'''
# print 'no slope'
if type(args[0]) == Data:
dat = args[0]
xy = dat.get_data()
x = xy[:, 0]
y = xy[:, 1]
else:
x = args[0]
y = args[1]
silent = kw.pop('silent', True)
p0i = param_estimator_complex(x, y)
if not silent:
print('init guess: ', p0i)
pars = S21p0i2paramsc(x, p0i)
fr = fit.fitxy(pars, y, fn.disp_hanger_S21_complex_sloped, ret_fit=True)
if not silent:
fit.print_fitres(pars)
fitpars = params2tuple(pars)
fitparserr = paramserr2tuple(pars)
output = {'fitpars': fitpars, 'fitparserr': fitparserr,
'fit': fr[1], 'fitparsobj': pars, 'parsinit': p0i}
return output
# return pfit,errp,fr[1],fr[0],pars,p0i
def param_estimator_complex(x, y):
# Temps to make param estimations
ya = np.abs(y[:])
phi_0 = cmath.phase(y[0])
phi_v = 0
s21min = np.abs(min(ya)/max(ya))
y_rotated = y*np.exp(-1.j*(phi_v*x+phi_0-phi_v*x[0]))
y_min = np.argmin(y_rotated.imag)
y_max = np.argmax(y_rotated.imag)
# Parameters
A = (ya[0]+ya[-1])/2.
f0 = x[ya[:].tolist().index(min(ya[:]))]
Q = f0/(abs(x[y_min]-x[y_max]))
Qca = np.abs(Q/np.abs(1-s21min))
#-440.
phi_0 = phi_0
p0i = (A, f0, Q, Qca, phi_0, phi_v)
return p0i
def S21p0i2paramsc(f, p0i, **kw):
pars = fit.Parameters()
(A, f0, Q, Qe, phi_0, phi_v) = p0i
pars.add('A', value=A)
pars.add('f', value=f, vary=False)
pars.add('f0', value=f0)
pars.add('Q', value=Q)
pars.add('Qe', value=Qe)
pars.add('Qi', expr='1./(1./Q-1./Qe*cos(theta))', vary=False)
pars.add('Qc', expr='Qe/cos(theta)', vary=False)
pars.add('df', value=0., vary=True)
pars.add('theta', value=0.)
pars.add('phi_v', value=phi_v)
pars.add('phi_0', value=phi_0) # ,min=-np.pi,max=np.pi)
return pars
def fit_S21_resonance(*args, **kw):
'''
input:
x,y
returns fitresults with:
fr[0]=(A,f0,Q,s21min,slope,theta)
'''
# print 'no slope'
if type(args[0]) == Data:
dat = args[0]
xy = dat.get_data()
x = xy[:, 0]
y = xy[:, 1]
else:
x = args[0]
y = np.abs(args[1])
p0i = param_estimator(x, y)
silent = kw.pop('silent', True)
if not silent:
print('init guess: ', p0i)
pars = S21p0i2params(x, p0i)
# print 'init_guess', p0i
fr = fit.fitxy(pars, y, fn.S21_resonance_sloped, ret_fit=True)
if not silent:
fit.print_fitres(pars)
fitpars = params2tuple(pars)
fitparserr = paramserr2tuple(pars)
output = {'fitpars': fitpars, 'parserr': fitparserr,
'fit': fr[1], 'fitparsobj': pars, 'parsinit': p0i}
return output
# return pfit,errp,fr[1],fr[0],pars,p0i
def param_estimator(x, y):
# Temps to make param estimations
ya = np.abs(y[:])
s21min = np.abs(min(ya)/max(ya))
y_min = ya.tolist().index(min(ya[:]))
y_max = ya.tolist().index(max(ya[:]))
# Parameters
slope = 0.
theta = 0.
A = (ya[0]+ya[-1])/2.
f0 = x[y_min]
Q = f0/(abs(x[y_min]-x[y_max]))
Qca = np.abs(Q/np.abs(1-s21min))
p0i = (A, f0, Q, Qca, slope, theta)
return p0i
def S21p0i2params(f, p0i):
pars = fit.Parameters()
(A, f0, Q, Qe, dx, theta) = p0i
pars.add('A', value=A, min=0.)
pars.add('f', value=f, vary=False)
pars.add('f0', value=f0, min=0.)
pars.add('Q', value=Q, min=0.)
pars.add('Qe', value=Qe, min=0.)
pars.add('Qi', expr='1./(1./Q-1./Qe*cos(theta))', vary=False)
pars.add('Qc', expr='Qe/cos(theta)', vary=False)
pars.add('df', value=0.)
pars.add('theta', value=0., min=-np.pi/2, max=np.pi/2)
return pars
def params2tuple(pars, *args):
'''
p = (A,f0,Q,Qe,df,theta)
'''
if len(args) == 1:
keys = args[0]
else:
keys = ['A', 'f0', 'Q', 'Qe', 'df', 'theta']
p = ()
for key in keys:
p += (pars[key].value,)
return p
def paramserr2tuple(pars, *args):
'''
p = (A,f0,Q,Qe,df,theta)
'''
if len(args) == 1:
keys = args[0]
else:
keys = ['A', 'f0', 'Q', 'Qe', 'df', 'theta']
errp = ()
for key in keys:
errp += (pars[key].stderr,)
return errp
def fit_lorentzian(x, y, plot=True):
y0 = (y[0]+y[-1])/2.
peak = np.abs((np.max(y)-y0)) > np.abs((np.min(y)-y0))
print('peak = ', peak, y0, np.max(y), np.min(y))
if peak:
A = np.max(y)-y0
f0 = x[y[:].tolist().index(max(y[:]))]
find_y2 = (y-y0) > A/2
else:
A = np.min(y)-y0
f0 = x[y[:].tolist().index(min(y[:]))]
find_y2 = (y-y0) < A/2
ind_df = find_y2.tolist().index(True)
df = 2*np.abs(x[ind_df] - f0)
pars = fit.Parameters()
pars.add('f', value=x, vary=False)
pars.add('A', value=A)
pars.add('f0', value=f0)
pars.add('df', value=df)
pars.add('y0', value=y0)
fit.print_fitres(pars)
fit.fitxy(pars, y, fn.Lorentzian)
fit.print_fitres(pars)
yfit = fn.Lorentzian(pars)
if plot:
plt.plot(x, yfit, label='fit: f0 = %.4f, df = %.1f' %
(pars['f0'].value, 1e3*(pars['df'].value)))
plt.legend()
return pars, fn.Lorentzian(pars)
# Conversions
pi, log = np.pi, np.log
e_ = 1.6e-19
h = 6.62e-34
phi_0 = 2.e-15
hbar = h/2/np.pi
k_B = 1.38e-23
eps_0 = 8.854e-12
mu_0 = 4*np.pi*1e-7
def VtodBm(V):
'''
Converts voltage in 50 Ohm to dBm
'''
P = (V**2)/50/np.sqrt(2.)
return 10*np.log(P/1e-3)/np.log(10)
def photon_number(eps, kappa):
'''
steady state photon number from drive strenghth eps_d and kappa
'''
n_bar = (2*eps/kappa)**2 # kappa=2pi * decay rate
return n_bar
def dBm2nbar(P, f0, Ql, Qi):
'''
Calculates n_bar for hangers form the power in the feedline
'''
Pin = 10**(P/10.)*1e-3
Pres = (1-(1.*Ql/Qi)**2)*Pin
print(Pres)
n_in = Pres/(h*f0)/(f0/Ql)
return n_in
def C_to_Ec(C):
'''
returns Ec in GHz from the capacitance
'''
return e_**2/2/C/h/1e9
def Ic_to_Ej(Ic):
'''
returns Ej in GHz from Ic
'''
return Ic*phi_0/2/pi/h/1e9
def EjEc_to_f(Ej, Ec):
'''
Calculates transmon f_ge from Ec and Ej
'''
return np.sqrt(8*Ej*Ec)
def IctoLj(Ic):
return phi_0/2/np.pi/Ic
def IcC_to_f(Ic, C):
'''
Calculates transmon f_ge from Ic and C
'''
Ec = C_to_Ec(C)
Ej = Ic_to_Ej(Ic)
return EjEc_to_f(Ej, Ec)
def allclose():
while not not plt.get_fignums():
plt.close()
| mit |
INGEOTEC/microTC | microtc/regscorewrapper.py | 1 | 4814 | # Copyright 2016 Mario Graff (https://github.com/mgraffg)
# Copyright 2016 Eric S. Tellez <[email protected]>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from time import time
from sklearn.metrics import r2_score
from scipy.stats import pearsonr, spearmanr
from sklearn import preprocessing
from sklearn import model_selection
from microtc.textmodel import TextModel
from microtc.wrappers import RegressorWrapper
class RegressionScoreSampleWrapper(object):
valid_scores = ['r2', 'pearsonr', 'spearmanr']
def __init__(self, X, y, Xstatic=[], ystatic=[], ratio=0.8, test_ratio=None, score='r2', classifier=RegressorWrapper, random_state=None):
assert ratio < 1, "ratio {0} is invalid, valid values are 0 < ratio < 1".format(ratio)
self.score = score
self.le = preprocessing.LabelEncoder().fit(y)
self.create_classifier = classifier
if test_ratio is None:
test_ratio = 1.0 - ratio
I = list(range(len(y)))
np.random.shuffle(I)
s = int(np.ceil(len(y) * ratio))
s_end = int(np.ceil(len(y) * test_ratio))
y = self.le.transform(y)
train, test = I[:s], I[s:s+s_end]
self.train_corpus = [X[i] for i in train]
self.train_corpus.extend(Xstatic)
if len(ystatic) > 0:
ystatic = self.le.transform(ystatic)
self.train_y = np.hstack((y[train], ystatic))
else:
self.train_y = y[train]
self.test_corpus = [X[i] for i in test]
self.test_y = y[test]
def __call__(self, conf_code):
conf, code = conf_code
st = time()
textmodel = TextModel(self.train_corpus, **conf)
train_X = [textmodel[doc] for doc in self.train_corpus]
c = self.create_classifier()
# c.fit(train_X, self.train_y)
try:
c.fit(train_X, self.train_y)
except ValueError:
conf["_error"] = "this configuration produces an empty matrix"
conf["_score"] = 0.0
return conf
test_X = [textmodel[doc] for doc in self.test_corpus]
pred_y = c.predict(test_X)
self.compute_score(conf, pred_y)
conf['_time'] = (time() - st)
return conf
def compute_score(self, conf, hy):
conf['_r2'] = r2_score(self.test_y, hy)
conf['_spearmanr'] = spearmanr(self.test_y, hy)[0]
conf['_pearsonr'] = pearsonr(self.test_y, hy)[0]
conf['_score'] = conf['_' + self.score]
# print(conf)
class RegressionScoreKFoldWrapper(RegressionScoreSampleWrapper):
def __init__(self, X, y, Xstatic=[], ystatic=[], nfolds=5, score='r2', classifier=RegressorWrapper, random_state=None):
self.nfolds = nfolds
self.score = score
# self.X = np.array(X)
self.X = X
self.Xstatic = Xstatic
self.le = preprocessing.LabelEncoder().fit(y)
self.y = self.le.transform(y)
if len(ystatic) > 0:
self.ystatic = self.le.transform(ystatic)
else:
self.ystatic = []
self.test_y = self.y
self.create_classifier = classifier
self.kfolds = model_selection.KFold(n_splits=nfolds, shuffle=True, random_state=random_state)
def __call__(self, conf_code):
conf, code = conf_code
st = time()
predY = np.zeros(len(self.y))
# X = np.array(self.X)
for train, test in self.kfolds.split(self.X):
# A = X[train]
A = [self.X[i] for i in train]
if len(self.Xstatic) > 0:
A.extend(self.Xstatic)
trainY = self.y[train]
if len(self.ystatic) > 0:
trainY = np.hstack((trainY, self.ystatic))
textmodel = TextModel(A, **conf)
trainX = [textmodel[x] for x in A]
c = self.create_classifier()
try:
c.fit(trainX, trainY)
except ValueError:
conf["_error"] = "this configuration produces an empty matrix"
conf["_score"] = 0.0
return conf
testX = [textmodel[self.X[i]] for i in test]
predY[test] = c.predict(testX)
self.compute_score(conf, predY)
conf['_time'] = (time() - st) / self.nfolds
return conf
| apache-2.0 |
trungnt13/scikit-learn | sklearn/decomposition/tests/test_factor_analysis.py | 222 | 3055 | # Author: Christian Osendorfer <[email protected]>
# Alexandre Gramfort <[email protected]>
# Licence: BSD3
import numpy as np
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.decomposition import FactorAnalysis
def test_factor_analysis():
# Test FactorAnalysis ability to recover the data covariance structure
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
assert_raises(ValueError, FactorAnalysis, svd_method='foo')
fa_fail = FactorAnalysis()
fa_fail.svd_method = 'foo'
assert_raises(ValueError, fa_fail.fit, X)
fas = []
for method in ['randomized', 'lapack']:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert_equal(X_t.shape, (n_samples, n_components))
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert_greater(diff, 0., 'Log likelihood dif not increase')
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert_less(diff, 0.1, "Mean absolute difference is %f" % diff)
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
f = lambda x, y: np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ['loglike_', 'components_', 'noise_variance_']:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
fa1.max_iter = 1
fa1.verbose = True
assert_warns(ConvergenceWarning, fa1.fit, X)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
| bsd-3-clause |
pcmagic/stokes_flow | src/stokes_flow.py | 1 | 220301 | # coding=utf-8
"""
functions for solving stokes flow using regularised stokeslets (and its improved) method.
Zhang Ji, 20160409
"""
# import sys
# sys.path = ['..'] + sys.path
# from memory_profiler import profile
# from math import sin, cos
# import warnings
from pyvtk import *
import os
import matplotlib.pyplot as plt
import copy
import numpy as np
from scipy.io import savemat, loadmat
from evtk.hl import pointsToVTK, gridToVTK
from petsc4py import PETSc
import pickle
from time import time
from tqdm import tqdm
from tqdm.notebook import tqdm as tqdm_notebook
from src.support_class import *
from src.geo import *
from src.ref_solution import *
from src.StokesFlowMethod import light_stokeslets_matrix_3d
import itertools
class StokesFlowProblem:
def _init_kwargs(self, **kwargs):
pass
def __init__(self, **kwargs):
self._obj_list = uniqueList() # contain objects
self._kwargs = kwargs # kwargs associate with solving method,
self._init_kwargs(**kwargs)
self._force = np.zeros([0]) # force information
self._force_petsc = PETSc.Vec().create(comm=PETSc.COMM_WORLD)
self._velocity_petsc = PETSc.Vec().create(comm=PETSc.COMM_WORLD) # velocity information
self._re_velocity = np.zeros([0]) # resolved velocity information
self._n_fnode = 0
self._n_unode = 0
self._f_pkg = PETSc.DMComposite().create(comm=PETSc.COMM_WORLD)
self._u_pkg = PETSc.DMComposite().create(comm=PETSc.COMM_WORLD)
self._M_petsc = PETSc.Mat().create(comm=PETSc.COMM_WORLD) # M matrix
self._M_destroyed = False # weather Mat().destroy() have been called.
self._finish_solve = False
self._pick_M = False # weather save M matrix and F vector in binary files.
self._n_unknown = 3 # number of unknowns at each node.
self._pick_filename = '..' # prefix filename of pick files. i.e. filename_F.bin, filename_M.bin, filename_pick.bin.
self._residualNorm = 0.
self._convergenceHistory = np.zeros([0])
from src import StokesFlowMethod
self._method_dict = {
'rs': StokesFlowMethod.regularized_stokeslets_matrix_3d_petsc,
'rs_plane': StokesFlowMethod.regularized_stokeslets_plane_matrix_3d_petsc,
'tp_rs': StokesFlowMethod.two_para_regularized_stokeslets_matrix_3d,
'lg_rs': StokesFlowMethod.legendre_regularized_stokeslets_matrix_3d,
'pf': StokesFlowMethod.point_force_matrix_3d_petsc,
'pf_dualPotential': StokesFlowMethod.dual_potential_matrix_3d_petsc,
'rs_stokesletsInPipe': StokesFlowMethod.regularized_stokeslets_matrix_3d_petsc,
'pf_stokesletsInPipe': StokesFlowMethod.point_force_matrix_3d_petsc,
'pf_stokesletsInPipeforcefree': StokesFlowMethod.point_force_matrix_3d_petsc,
'pf_stokesletsTwoPlane': StokesFlowMethod.two_plane_matrix_3d_petsc,
'pf_infhelix': StokesFlowMethod.pf_infhelix_3d_petsc,
'pf_stokesletsRingInPipe': StokesFlowMethod.point_force_matrix_3d_petsc,
'pf_stokesletsRingInPipeProblemSymz': StokesFlowMethod.point_force_matrix_3d_petsc,
'pf_stokesletsRing': StokesFlowMethod.point_force_ring_3d_petsc,
'pf_selfRepeat': StokesFlowMethod.self_repeat_3d_petsc,
'pf_selfRotate': StokesFlowMethod.self_rotate_3d_petsc,
'rs_selfRotate': StokesFlowMethod.self_rotate_3d_petsc,
'lg_rs_selfRotate': StokesFlowMethod.self_rotate_3d_petsc,
}
self._check_args_dict = {
'rs': StokesFlowMethod.check_regularized_stokeslets_matrix_3d,
'rs_plane': StokesFlowMethod.check_regularized_stokeslets_plane_matrix_3d,
'tp_rs': StokesFlowMethod.check_two_para_regularized_stokeslets_matrix_3d,
'lg_rs': StokesFlowMethod.check_legendre_regularized_stokeslets_matrix_3d,
'pf': StokesFlowMethod.check_point_force_matrix_3d_petsc,
'pf_dualPotential': StokesFlowMethod.check_dual_potential_matrix_3d_petsc,
'rs_stokesletsInPipe': StokesFlowMethod.check_regularized_stokeslets_matrix_3d,
'pf_stokesletsInPipe': StokesFlowMethod.check_point_force_matrix_3d_petsc,
'pf_stokesletsInPipeforcefree': StokesFlowMethod.check_point_force_matrix_3d_petsc,
'pf_stokesletsTwoPlane': StokesFlowMethod.check_two_plane_matrix_3d_petsc,
'pf_infhelix': StokesFlowMethod.check_pf_infhelix_3d_petsc,
'pf_stokesletsRingInPipe': StokesFlowMethod.check_point_force_matrix_3d_petsc,
'pf_stokesletsRingInPipeProblemSymz': StokesFlowMethod.check_point_force_matrix_3d_petsc,
'pf_stokesletsRing': StokesFlowMethod.check_point_force_matrix_3d_petsc,
'pf_selfRepeat': StokesFlowMethod.check_point_force_matrix_3d_petsc,
'pf_selfRotate': StokesFlowMethod.check_self_rotate_3d_petsc,
'rs_selfRotate': StokesFlowMethod.check_self_rotate_3d_petsc,
'lg_rs_selfRotate': StokesFlowMethod.check_self_rotate_3d_petsc,
}
def _check_add_obj(self, obj):
pass
def add_obj(self, obj):
"""
Add a new object to the problem.
:type obj: StokesFlowObj
:param obj: added object
:return: none.
"""
self._check_add_obj(obj)
self._obj_list.append(obj)
obj.set_index(self.get_n_obj())
obj.set_problem(self)
obj.set_matrix_method(self.get_matrix_method())
self._f_pkg.addDM(obj.get_f_geo().get_dmda())
self._u_pkg.addDM(obj.get_u_geo().get_dmda())
self._n_fnode += obj.get_n_f_node()
self._n_unode += obj.get_n_u_node()
return True
def do_solve_process(self, obj_list, pick_M=False):
obj_tube = list(tube_flatten((obj_list,)))
fileHandle = self._kwargs['fileHandle']
for obj in obj_tube:
self.add_obj(obj)
if self._kwargs['pickProblem']:
self.pickmyself(fileHandle, ifcheck=True)
self.print_info()
self.create_matrix()
residualNorm = self.solve()
# # dbg
# self.saveM_mat(fileHandle)
if self._kwargs['pickProblem']:
self.pickmyself(fileHandle, pick_M=pick_M)
return residualNorm
def __repr__(self):
return type(self).__name__
def __str__(self):
return self.get_name()
def _create_matrix_obj(self, obj1, m_petsc, INDEX='', *args):
# obj1 contain velocity information, obj2 contain force information
kwargs = self.get_kwargs()
n_obj = len(self.get_all_obj_list())
for i0, obj2 in enumerate(self.get_all_obj_list()):
kwargs['INDEX'] = ' %d/%d, ' % (i0 + 1, n_obj) + INDEX
self._check_args_dict[obj2.get_matrix_method()](**kwargs)
self._method_dict[obj2.get_matrix_method()](obj1, obj2, m_petsc, **kwargs)
m_petsc.assemble()
return True
def updata_matrix(self, obj1, obj2, INDEX=''):
# obj1 contain velocity information, obj2 contain force information
kwargs = self._kwargs
kwargs['INDEX'] = INDEX
self._method_dict[obj2.get_matrix_method()](obj1, obj2, self._M_petsc, **kwargs)
self._M_petsc.assemble()
return True
def _create_U(self):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
velocity = self._u_pkg.createGlobalVector()
velocity.zeroEntries()
for i0, obj0 in enumerate(self.get_obj_list()):
u0 = obj0.get_velocity()
_, u_glbIdx_all = obj0.get_u_geo().get_glbIdx()
if rank == 0:
velocity[u_glbIdx_all] = u0[:]
velocity.assemble()
self._velocity_petsc = velocity
return True
def _set_glbIdx(self):
# global index
f_isglb = self._f_pkg.getGlobalISs()
u_isglb = self._u_pkg.getGlobalISs()
# for i0, obj0 in enumerate(self.get_obj_list()):
# obj0.get_f_geo().set_glbIdx(f_isglb[i0].getIndices())
# obj0.get_u_geo().set_glbIdx(u_isglb[i0].getIndices())
for obj0, t_f_isglb, t_u_isglb in zip(self.get_all_obj_list(), f_isglb, u_isglb):
obj0.get_f_geo().set_glbIdx(t_f_isglb.getIndices())
obj0.get_u_geo().set_glbIdx(t_u_isglb.getIndices())
return True
def create_F_U(self):
# create f and u DMComposite
self._f_pkg.setFromOptions()
self._f_pkg.setUp()
self._u_pkg.setFromOptions()
self._u_pkg.setUp()
# glbIdx
self._set_glbIdx()
# velocity
self._create_U()
# force
self._force_petsc = self._f_pkg.createGlobalVector()
self._force_petsc.set(0)
return True
def create_empty_M(self):
kwargs = self._kwargs
solve_method = kwargs['solve_method']
err_msg = 'at least one object is necessary. '
assert len(self._obj_list) > 0, err_msg
err_msg = 'unequal force and velocity degrees of freedom, only lsqr method is accept. '
for obj1 in self.get_all_obj_list():
assert obj1.get_n_force() == obj1.get_n_velocity() or solve_method == 'lsqr', err_msg
# create matrix
self._M_petsc.setSizes((self._velocity_petsc.getSizes(), self._force_petsc.getSizes()))
self._M_petsc.setType('dense')
self._M_petsc.setFromOptions()
self._M_petsc.setUp()
return self._M_petsc
def create_matrix(self):
t0 = time()
self.create_F_U()
if not self._M_petsc.isAssembled():
self.create_empty_M()
self._M_destroyed = False
n_obj = len(self.get_all_obj_list())
for i0, obj1 in enumerate(self.get_all_obj_list()):
INDEX = ' %d/%d' % (i0 + 1, n_obj)
self._create_matrix_obj(obj1, self._M_petsc, INDEX)
# self._M_petsc.view()
t1 = time()
PETSc.Sys.Print(' %s: create matrix use: %fs' % (str(self), (t1 - t0)))
return True
def set_matrix(self, m_petsc):
self.create_F_U()
self._M_petsc = m_petsc
return True
def create_part_matrix(self, uobj_old, fobj_old, uobj_new, fobj_new, M):
# assuming the problem have n+1 objects named obj_0, obj_1, ... obj_n. After create the M matrix of the problem
# we may need to create a mini M matrix associated with part of objects {obj_k, k in [0, n]}, this method get
# values of mini_M matrix from the main M matrix of the problem, to save the creating time.
"""
:type uobj_old: StokesFlowObj
:type uobj_new: StokesFlowObj
:type fobj_old: StokesFlowObj
:type fobj_new: StokesFlowObj
:param uobj_old:
:param fobj_old:
:param uobj_new:
:param fobj_new:
:param M:
:return:
"""
err_msg = 'uobj_old and uobj_new are not same. '
assert (uobj_old.get_u_geo().get_nodes() == uobj_new.get_u_geo().get_nodes()).all(), err_msg
err_msg = 'fobj_old and fobj_new are not same. '
assert (fobj_old.get_f_geo().get_nodes() == fobj_new.get_f_geo().get_nodes()).all(), err_msg
u_glbIdx_old, u_glbIdx_all_old = uobj_old.get_u_geo().get_glbIdx()
f_glbIdx_old, f_glbIdx_all_old = fobj_old.get_f_geo().get_glbIdx()
_, u_glbIdx_all_new = uobj_new.get_u_geo().get_glbIdx()
_, f_glbIdx_all_new = fobj_new.get_f_geo().get_glbIdx()
t_Idx = np.searchsorted(u_glbIdx_all_old, u_glbIdx_old)
u_glbIdx_new = u_glbIdx_all_new[t_Idx]
temp0 = self._M_petsc.getValues(u_glbIdx_old, f_glbIdx_all_old)
M.setValues(u_glbIdx_new, f_glbIdx_all_new, temp0, addv=False)
M.assemble()
return True
def create_obj_matrix(self, objf: 'StokesFlowObj', # force object
obju: 'StokesFlowObj', # velocity object
copy_obj=True,
**kwargs):
if copy_obj:
obj1 = objf.copy()
obj2 = obju.copy()
else:
obj1 = objf
obj2 = obju
t_f_pkg = PETSc.DMComposite().create(comm=PETSc.COMM_WORLD)
t_u_pkg = PETSc.DMComposite().create(comm=PETSc.COMM_WORLD)
t_f_pkg.addDM(obj1.get_f_geo().get_dmda())
t_u_pkg.addDM(obj2.get_u_geo().get_dmda())
t_f_pkg.setFromOptions()
t_f_pkg.setUp()
t_u_pkg.setFromOptions()
t_u_pkg.setUp()
f_isglb = t_f_pkg.getGlobalISs()
u_isglb = t_u_pkg.getGlobalISs()
obj1.get_f_geo().set_glbIdx(f_isglb[0].getIndices())
obj2.get_u_geo().set_glbIdx(u_isglb[0].getIndices())
t_velocity = t_u_pkg.createGlobalVector()
t_force = t_f_pkg.createGlobalVector()
m_petsc = PETSc.Mat().create(comm=PETSc.COMM_WORLD)
m_petsc.setSizes((t_velocity.getSizes(), t_force.getSizes()))
m_petsc.setType('dense')
m_petsc.setFromOptions()
m_petsc.setUp()
self._method_dict[kwargs['matrix_method']](obj2, obj1, m_petsc, **kwargs)
m_petsc.assemble()
t_velocity.destroy()
t_force.destroy()
return m_petsc
def solve(self, ini_guess=None):
t0 = time()
kwargs = self._kwargs
solve_method = kwargs['solve_method']
precondition_method = kwargs['precondition_method']
if ini_guess is not None:
err_msg = 'size of initial guess for force vector must equal to the number of M matrix rows. '
assert self._force_petsc.getSize() == ini_guess.getSize(), err_msg
self._force_petsc[:] = ini_guess[:]
ksp = PETSc.KSP()
ksp.create(comm=PETSc.COMM_WORLD)
ksp.setType(solve_method)
ksp.getPC().setType(precondition_method)
ksp.setOperators(self._M_petsc)
OptDB = PETSc.Options()
ksp.setFromOptions()
# Todo: dbg_GMRESRestart
if not OptDB.getBool('debug', False):
tolerance = ksp.getTolerances()
ksp.setGMRESRestart(tolerance[-1])
ksp.setInitialGuessNonzero(True)
ksp.setUp()
self._solve_force(ksp)
self._residualNorm = self._resolve_velocity(ksp)
ksp.destroy()
t1 = time()
PETSc.Sys.Print(' %s: solve matrix equation use: %fs, with residual norm %e' %
(str(self), (t1 - t0), self._residualNorm))
return self._residualNorm
def _solve_force(self, ksp):
if self._kwargs['getConvergenceHistory']:
ksp.setConvergenceHistory()
ksp.solve(self._velocity_petsc, self._force_petsc)
self._convergenceHistory = ksp.getConvergenceHistory()
else:
ksp.solve(self._velocity_petsc, self._force_petsc)
# reorder force from petsc index to normal index, and separate to each object.
t_force = self.vec_scatter(self._force_petsc, destroy=False)
tmp = []
for obj0 in self.get_obj_list():
_, f_glbIdx_all = obj0.get_f_geo().get_glbIdx()
obj0.set_force(t_force[f_glbIdx_all])
tmp.append(t_force[f_glbIdx_all])
self._force = np.hstack(tmp)
return True
def _resolve_velocity(self, ksp):
re_velocity_petsc = self._M_petsc.createVecLeft()
self._M_petsc.mult(self._force_petsc, re_velocity_petsc)
self._re_velocity = self.vec_scatter(re_velocity_petsc)
for obj0 in self.get_all_obj_list():
_, u_glbIdx_all = obj0.get_u_geo().get_glbIdx()
obj0.set_re_velocity(self._re_velocity[u_glbIdx_all])
self._finish_solve = True
return ksp.getResidualNorm()
def solve_obj_u(self, obj: 'StokesFlowObj', INDEX=''):
"""
solve velocity for given object.
"""
self.check_finish_solve()
kwargs = self._kwargs
n_node_threshold = kwargs['n_node_threshold']
# partition object into several parts if it contain too many nodes; and then solve in a loop.
sub_obj_list = uniqueList()
n_obj_nodes = obj.get_n_u_node()
n_sub_obj = int(obj.get_n_u_node() / n_node_threshold) + 1
obj_nodes = obj.get_u_nodes()
for i0 in range(n_sub_obj):
sub_obj1 = obj_dic[self.get_kwargs()['matrix_method']]()
sub_geo1 = base_geo()
id0 = i0 * n_obj_nodes // n_sub_obj
id1 = (i0 + 1) * n_obj_nodes // n_sub_obj
sub_geo1.set_dof(sub_obj1.get_n_unknown())
sub_geo1.set_nodes(obj_nodes[id0:id1], resetVelocity=True,
deltalength=obj.get_u_geo().get_deltaLength())
sub_obj_kwargs = {'name': '%s_sub_%d' % (str(obj), i0)}
sub_obj1.set_data(sub_geo1, sub_geo1, **sub_obj_kwargs)
sub_obj_list.append(sub_obj1)
obj_u = obj.get_velocity().copy()
n_obj = len(sub_obj_list)
for i1, sub_obj1 in enumerate(sub_obj_list):
sub_u_dmda = sub_obj1.get_u_geo().get_dmda()
sub_u_pkg = PETSc.DMComposite().create(comm=PETSc.COMM_WORLD)
sub_u_pkg.addDM(sub_u_dmda)
sub_u_pkg.setFromOptions()
sub_u_pkg.setUp()
sub_u_isglb = sub_u_pkg.getGlobalISs()
sub_obj_u_petsc = sub_u_dmda.createGlobalVector()
sub_obj1.get_u_geo().set_glbIdx(sub_u_isglb[0].getIndices())
m_petsc = PETSc.Mat().create(comm=PETSc.COMM_WORLD)
m_petsc.setSizes((sub_obj_u_petsc.getSizes(), self._force_petsc.getSizes()))
m_petsc.setType('dense')
m_petsc.setFromOptions()
m_petsc.setUp()
INDEX = ' %d/%d, ' % (i1 + 1, n_obj) + INDEX
self._create_matrix_obj(sub_obj1, m_petsc, INDEX)
# sub_obj_u_petsc.set(0)
m_petsc.mult(self._force_petsc, sub_obj_u_petsc)
sub_obj_u = self.vec_scatter(sub_obj_u_petsc)
id0 = i1 * n_obj_nodes // n_sub_obj * 3
id1 = (i1 + 1) * n_obj_nodes // n_sub_obj * 3
obj_u[id0 + 0:id1:3] = sub_obj_u[0::self.get_n_unknown()]
obj_u[id0 + 1:id1:3] = sub_obj_u[1::self.get_n_unknown()]
obj_u[id0 + 2:id1:3] = sub_obj_u[2::self.get_n_unknown()]
m_petsc.destroy()
sub_u_pkg.destroy()
sub_obj1.get_u_geo().destroy_dmda()
return obj_u
def vtk_check(self, filename: str, obj: 'StokesFlowObj', ref_slt=None):
self.check_finish_solve()
obj_tube = list(tube_flatten((obj,)))
err = []
for obj in obj_tube:
if isinstance(obj, StokesFlowObj):
err.append(self._vtk_check(filename + '_' + str(obj) + '_check',
obj, ref_slt=ref_slt, INDEX=str(obj)))
else:
err_msg = 'unknown obj type. '
raise err_msg
return tube_flatten((err,))
def _vtk_check(self, filename: str, obj: "StokesFlowObj", ref_slt: "slt" = None, INDEX=''):
"""
check velocity at the surface of objects.
:type filename: str
:param filename: output file name
:type obj: StokesFlowObj
:param obj: check object (those known exact velocity information. )
:param ref_slt: reference solution function handle
:return: none.
"""
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
obj_u = self.solve_obj_u(obj, INDEX)
obj.set_re_velocity(obj_u)
if ref_slt is None:
u_exact = obj.get_velocity()
else:
u_exact = ref_slt.get_solution(obj.get_u_geo())
if rank == 0:
velocity_x = obj_u[0::3].copy()
velocity_y = obj_u[1::3].copy()
velocity_z = obj_u[2::3].copy()
velocity_err = u_exact - obj_u
velocity_err_x = velocity_err[0::3].copy()
velocity_err_y = velocity_err[1::3].copy()
velocity_err_z = velocity_err[2::3].copy()
rel_err_x = np.abs(velocity_err_x / velocity_x)
rel_err_y = np.abs(velocity_err_y / velocity_y)
rel_err_z = np.abs(velocity_err_z / velocity_z)
nodes = obj.get_u_nodes()
pointsToVTK(filename, nodes[:, 0].copy(), nodes[:, 1].copy(), nodes[:, 2].copy(),
data={"velocity_err": (velocity_err_x, velocity_err_y, velocity_err_z),
"velocity": (velocity_x, velocity_y, velocity_z),
"rel_err": (rel_err_x, rel_err_y, rel_err_z)})
errorall = np.sqrt(np.sum((obj_u - u_exact) ** 2) / np.sum(u_exact ** 2))
errorx = np.sqrt(np.sum((obj_u[0::3] - u_exact[0::3]) ** 2) / np.sum(u_exact[0::3] ** 2))
errory = np.sqrt(np.sum((obj_u[1::3] - u_exact[1::3]) ** 2) / np.sum(u_exact[1::3] ** 2))
errorz = np.sqrt(np.sum((obj_u[2::3] - u_exact[2::3]) ** 2) / np.sum(u_exact[2::3] ** 2))
error = np.hstack((errorall, errorx, errory, errorz))
return error
def check_vtk_velocity(self):
field_range = self._kwargs['field_range']
n_grid = self._kwargs['n_grid']
n_range = field_range.shape
if n_range[0] > n_range[1]:
field_range = field_range.transpose()
n_range = field_range.shape
if n_range != (2, 3):
err_msg = 'maximum and minimum coordinates for the rectangular velocity field are necessary, ' + \
'i.e. range = [[0,0,0],[10,10,10]]. '
raise ValueError(err_msg)
self.check_finish_solve()
n_grid = n_grid.ravel()
if n_grid.shape != (3,):
err_msg = 'mesh number of each axis for the rectangular velocity field is necessary, ' + \
'i.e. n_grid = [100, 100, 100]. '
raise ValueError(err_msg)
return field_range, n_grid
def vtk_velocity(self, filename: str):
t0 = time()
self.check_finish_solve()
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
field_range, n_grid = self.check_vtk_velocity()
region_type = self._kwargs['region_type']
myregion = region()
full_region_x, full_region_y, full_region_z = \
myregion.type[region_type](field_range, n_grid)
# to handle big problem, solve velocity field at every splice along x axis.
if rank == 0:
u_x = np.zeros((n_grid[0], n_grid[1], n_grid[2]))
u_y = np.zeros((n_grid[0], n_grid[1], n_grid[2]))
u_z = np.zeros((n_grid[0], n_grid[1], n_grid[2]))
else:
u_x = None
u_y = None
u_z = None
obj0 = StokesFlowObj()
for i0 in range(full_region_x.shape[0]):
temp_x = full_region_x[i0]
temp_y = full_region_y[i0]
temp_z = full_region_z[i0]
temp_nodes = np.c_[temp_x.ravel(), temp_y.ravel(), temp_z.ravel()]
temp_geo = base_geo()
temp_geo.set_nodes(temp_nodes, resetVelocity=True, deltalength=0)
obj0.set_data(temp_geo, temp_geo)
u = self.solve_obj_u(obj0)
if rank == 0:
u_x[i0, :, :] = u[0::3].reshape((n_grid[1], n_grid[2]))
u_y[i0, :, :] = u[1::3].reshape((n_grid[1], n_grid[2]))
u_z[i0, :, :] = u[2::3].reshape((n_grid[1], n_grid[2]))
else:
u_x = None
u_y = None
u_z = None
if rank == 0:
# output data
gridToVTK(filename, full_region_x, full_region_y, full_region_z,
pointData={"velocity": (u_x, u_y, u_z)})
t1 = time()
PETSc.Sys.Print('%s: write vtk files of surrounding velocity use: %fs'
% (str(self), (t1 - t0)))
return True
def vtk_self(self, filename, stp_idx=0):
t0 = time()
self.check_finish_solve()
obj0 = obj_dic[self.get_kwargs()['matrix_method']]()
obj0.combine(self.get_all_obj_list(), set_re_u=True, set_force=True)
obj0.set_name('Prb')
obj0.set_matrix_method(self.get_kwargs()['matrix_method'])
# self.show_velocity()
# obj0.show_velocity()
obj0.vtk(filename, stp_idx)
t1 = time()
PETSc.Sys.Print('%s: write self vtk files use: %fs' % (str(self), (t1 - t0)))
return True
def vtk_obj(self, filename, stp_idx=0):
self.check_finish_solve()
for obj1 in self._obj_list:
obj1.vtk(filename, stp_idx)
return True
def vtk_tetra(self, filename: str, bgeo: base_geo):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
t0 = time()
self.check_finish_solve()
bnodes = bgeo.get_nodes()
belems, elemtype = bgeo.get_mesh()
err_msg = 'mesh type is NOT tetrahedron. '
assert elemtype == 'tetra', err_msg
obj1 = StokesFlowObj()
obj1.set_data(bgeo, bgeo)
u = self.solve_obj_u(obj1)
if rank == 0:
u = np.array(u).reshape(bnodes.shape)
vtk = VtkData(UnstructuredGrid(bnodes, tetra=belems, ),
PointData(Vectors(u, name='u')),
str(self))
vtk.tofile(filename)
t1 = time()
PETSc.Sys.Print(
'export to %s.vtk, element type is %s, contain %d nodes and %d elements using %fs. '
% (filename, elemtype, bnodes.shape[0], belems.shape[0], (t1 - t0)))
return True
def saveM_ASCII(self, filename: str = '..', ):
if filename[-4:] != '.txt':
filename = filename + '.txt'
err_msg = 'M matrix is been destroyed. '
assert not self._M_destroyed, err_msg
viewer = PETSc.Viewer().createASCII(filename, 'w', comm=PETSc.COMM_WORLD)
viewer(self._M_petsc)
viewer.destroy()
PETSc.Sys.Print('%s: save M matrix to %s' % (str(self), filename))
return True
def saveM_HDF5(self, filename: str = '..', ):
if filename[-3:] != '.h5':
filename = filename + '.h5'
err_msg = 'M matrix is been destroyed. '
assert not self._M_destroyed, err_msg
viewer = PETSc.Viewer().createHDF5(filename, 'w', comm=PETSc.COMM_WORLD)
viewer(self._M_petsc)
viewer.destroy()
PETSc.Sys.Print('%s: save M matrix to %s' % (str(self), filename))
return True
def _save_M_mat_dict(self, M_dict, obj):
t_name_all = str(obj) + '_Idx_all'
t_name = str(obj) + '_Idx'
u_glbIdx, u_glbIdx_all = obj.get_u_geo().get_glbIdx()
M_dict[t_name_all] = u_glbIdx_all
M_dict[t_name] = u_glbIdx
return True
def saveM_mat(self, filename: str = '..', M_name='M'):
if filename[-4:] == '.mat':
filename = filename[:-4]
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
size = comm.Get_size()
err_msg = 'M matrix is been destroyed. '
assert not self._M_destroyed, err_msg
M_dict = {M_name: self._M_petsc.getDenseArray(), }
for obj in self.get_obj_list():
self._save_M_mat_dict(M_dict, obj)
savemat(filename + '_rank%03d.mat' % rank,
M_dict,
oned_as='column')
PETSc.Sys.Print(
'%s: save M matrix to %s_rank(%03d~%03d).mat' % (str(self), filename, 0, size))
return True
def saveF_ASCII(self, filename: str = '..', ):
if filename[-4:] != '.txt':
filename = filename + '.txt'
viewer = PETSc.Viewer().createASCII(filename, 'w', comm=PETSc.COMM_WORLD)
viewer(self._force_petsc)
viewer.destroy()
PETSc.Sys.Print('%s: save force to %s' % (str(self), filename))
return True
def saveV_ASCII(self, filename: str = '..', ):
if filename[-4:] != '.txt':
filename = filename + '.txt'
viewer = PETSc.Viewer().createASCII(filename, 'w', comm=PETSc.COMM_WORLD)
viewer(self._velocity_petsc)
viewer.destroy()
PETSc.Sys.Print('%s: save velocity to %s' % (str(self), filename))
return True
def saveF_Binary(self, filename: str = '..', ):
if filename[-4:] != '.bin':
filename = filename + '.bin'
viewer = PETSc.Viewer().createBinary(filename, 'w', comm=PETSc.COMM_WORLD)
viewer(self._force_petsc)
viewer.destroy()
PETSc.Sys.Print('%s: save force to %s' % (str(self), filename))
return True
def saveV_Binary(self, filename: str = '..', ):
if filename[-4:] != '.bin':
filename = filename + '.bin'
viewer = PETSc.Viewer().createBinary(filename, 'w', comm=PETSc.COMM_WORLD)
viewer(self._velocity_petsc)
viewer.destroy()
PETSc.Sys.Print('%s: save velocity to %s' % (str(self), filename))
return True
def saveM_Binary(self, filename: str = '..', ):
if filename[-4:] != '.bin':
filename = filename + '.bin'
err_msg = 'M matrix is been destroyed. '
assert not self._M_destroyed, err_msg
viewer = PETSc.Viewer().createBinary(filename, 'w', comm=PETSc.COMM_WORLD)
viewer.pushFormat(viewer.Format.NATIVE)
viewer(self._M_petsc)
viewer.destroy()
PETSc.Sys.Print('%s: save M matrix to %s' % (str(self), filename))
return True
def loadM_Binary(self, filename: str):
if filename[-4:] != '.bin':
filename = filename + '.bin'
viewer = PETSc.Viewer().createBinary(filename, 'r')
self._M_petsc = PETSc.Mat().create(comm=PETSc.COMM_WORLD)
self._M_petsc.setSizes((self._velocity_petsc.getSizes(), self._force_petsc.getSizes()))
self._M_petsc.setType('dense')
self._M_petsc.setFromOptions()
self._M_petsc = self._M_petsc.load(viewer)
return True
def mat_destroy(self):
if not self._M_destroyed:
self._M_petsc.destroy()
self._M_destroyed = True
self._M_petsc = None
return True
else:
return False
def destroy(self):
self._force_petsc.destroy()
self._velocity_petsc.destroy()
self._f_pkg.destroy()
self._u_pkg.destroy()
self._f_pkg = None
self._u_pkg = None
self._force_petsc = None
self._velocity_petsc = None
return True
def pickmyself_prepare(self):
self.destroy()
for obji in self.get_obj_list():
obji.pickmyself_prepare()
return True
def pickmyself(self, filename: str, ifcheck=False, pick_M=False, unpick=True,
mat_destroy=True):
t0 = time()
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
self._pick_filename = filename
if (not ifcheck) and pick_M:
self._pick_M = pick_M
err_msg = 'self._finish_solve=%s' % self._finish_solve
assert self._finish_solve, err_msg
self.saveM_Binary(filename + '_M')
self.pickmyself_prepare()
if mat_destroy:
self.mat_destroy()
else:
t_M_petsc = self._M_petsc
self._M_destroyed = True
self._M_petsc = None
if rank == 0:
with open(filename + '_pick.bin', 'wb') as output:
pickler = pickle.Pickler(output, -1)
pickler.dump(self)
if unpick or ifcheck:
self.unpick_myself()
if not mat_destroy:
self._M_destroyed = False
self._M_petsc = t_M_petsc
t1 = time()
PETSc.Sys().Print('%s: pick the problem use: %fs' % (str(self), (t1 - t0)))
return True
def _unpick_addDM(self, obj1):
self._f_pkg.addDM(obj1.get_f_geo().get_dmda())
self._u_pkg.addDM(obj1.get_u_geo().get_dmda())
return True
def _unpick_set_force(self):
f_numpy = []
f_glbIdx = []
for obj0 in self.get_obj_list():
if isinstance(obj0, ForceFreeComposite):
for sub_obj in obj0.get_obj_list():
_, f_glbIdx_all = sub_obj.get_f_geo().get_glbIdx()
f_numpy.append(sub_obj.get_force())
f_glbIdx.append(f_glbIdx_all)
_, f_glbIdx_all = obj0.get_f_glbIdx()
f_numpy.append(obj0.get_ref_U())
f_glbIdx.append(f_glbIdx_all)
else:
_, f_glbIdx_all = obj0.get_f_geo().get_glbIdx()
f_numpy.append(obj0.get_force())
f_glbIdx.append(f_glbIdx_all)
f_numpy = np.hstack(f_numpy)
f_glbIdx = np.hstack(f_glbIdx)
self._force_petsc[f_glbIdx] = f_numpy[:]
self._force_petsc.assemble()
return True
def unpick_myself(self, check_MPISIZE=True):
filename = self._pick_filename
OptDB = PETSc.Options()
kwargs = self._kwargs
comm = PETSc.COMM_WORLD.tompi4py()
MPISIZE = comm.Get_size()
err_msg = 'call pickmyself() before unpick_myself(). i.e. store date first and reload them at restart mode. '
assert filename != '..', err_msg
if OptDB.getBool('check_MPISIZE', True) and check_MPISIZE:
err_msg = 'problem was picked with MPI size %d, current MPI size %d is wrong. ' \
% (kwargs['MPISIZE'], MPISIZE,)
assert kwargs['MPISIZE'] == MPISIZE, err_msg
else:
PETSc.Sys.Print('-->Warning, make sure the mpi size %d is correct. ' % MPISIZE)
self._f_pkg = PETSc.DMComposite().create(comm=PETSc.COMM_WORLD)
self._u_pkg = PETSc.DMComposite().create(comm=PETSc.COMM_WORLD)
self._M_petsc = PETSc.Mat().create(comm=PETSc.COMM_WORLD) # M matrix
for obj1 in self.get_obj_list():
obj1.unpick_myself()
self._unpick_addDM(obj1)
self._f_pkg.setFromOptions()
self._u_pkg.setFromOptions()
# Todo: setUp f_pkg and u_pkg at a appropriate time
# self._f_pkg.setUp()
# self._u_pkg.setUp()
if self._finish_solve:
self.create_F_U()
self._unpick_set_force()
# self._force_petsc.view()
if self._finish_solve and self._pick_M:
self.loadM_Binary(filename + '_M')
PETSc.Sys.Print('Unpick the problem from %s. ' % filename)
return True
def view_log_M(self, **kwargs):
m = self._M_petsc.getDenseArray()
view_args = {
'vmin': -10,
'vmax': 0,
'title': 'log10_abs_' + kwargs['matrix_method'],
'cmap': 'gray'
}
self._view_matrix(np.log10(np.abs(m) + 1e-100), **view_args)
def view_M(self, **kwargs):
m = self._M_petsc.getDenseArray()
view_args = {
'vmin': None,
'vmax': None,
'title': kwargs['matrix_method'],
'cmap': 'gray'
}
self._view_matrix(m, **view_args)
@staticmethod
def _view_matrix(m, **kwargs):
args = {
'vmin': None,
'vmax': None,
'title': ' ',
'cmap': None
}
for key, value in args.items():
if key in kwargs:
args[key] = kwargs[key]
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
cax = ax.matshow(m,
origin='lower',
vmin=args['vmin'],
vmax=args['vmax'],
cmap=plt.get_cmap(args['cmap']))
fig.colorbar(cax)
plt.title(args['title'])
plt.show()
def check_finish_solve(self):
err_msg = 'call solve() method first.'
assert self._finish_solve, err_msg
return True
def print_info(self):
if self._kwargs['plot_geo']:
self.show_f_u_nodes(linestyle='-')
OptDB = PETSc.Options()
if OptDB.getBool('plot_vel', 0):
length_factor = OptDB.getReal('vel_fct', 1)
self.show_velocity(length_factor=length_factor)
for obj in self.get_obj_list():
obj.print_info()
PETSc.Sys.Print('%s: force nodes: %d, velocity nodes: %d'
% (str(self), self.get_n_f_node(), self.get_n_u_node()))
def get_M(self):
err_msg = 'this method must be called before method vtk_velocity(), the latter one would destroy the M matrix. '
assert not self._M_destroyed, err_msg
M = self._M_petsc.getDenseArray().copy()
return M
def get_M_petsc(self):
return self._M_petsc
def get_n_f_node(self):
return self._n_fnode
def get_n_u_node(self):
return self._n_unode
def get_n_force(self):
return self._force_petsc.getSizes()[1]
def get_n_velocity(self):
return self._velocity_petsc.getSizes()[1]
def get_obj_list(self):
return self._obj_list
def get_all_obj_list(self):
return self.get_obj_list()
def get_n_obj(self):
return len(self._obj_list)
def dbg_get_U(self):
return self.vec_scatter(self._velocity_petsc, destroy=False)
def get_force(self):
return self._force
def get_force_x(self):
return self._force[0::self._n_unknown]
def get_force_y(self):
return self._force[1::self._n_unknown]
def get_force_z(self):
return self._force[2::self._n_unknown]
def get_total_force(self, center=np.zeros(3)):
F = np.zeros(6)
for obj0 in self.get_all_obj_list():
assert isinstance(obj0, StokesFlowObj)
F = F + obj0.get_total_force(center=center)
return F
def set_kwargs(self, **kwargs):
self._kwargs = kwargs # kwargs associate with solving method,
self._init_kwargs(**kwargs)
return True
def get_force_petsc(self):
return self._force_petsc
def get_n_unknown(self):
return self._n_unknown
def get_kwargs(self):
return self._kwargs
def get_matrix_method(self):
return self._kwargs['matrix_method']
def get_residualNorm(self):
self.check_finish_solve()
return self._residualNorm
def get_convergenceHistory(self):
self.check_finish_solve()
return self._convergenceHistory
def get_name(self):
return self._kwargs['fileHandle']
@staticmethod
def vec_scatter(vec_petsc, destroy=True):
scatter, temp = PETSc.Scatter().toAll(vec_petsc)
scatter.scatterBegin(vec_petsc, temp, False, PETSc.Scatter.Mode.FORWARD)
scatter.scatterEnd(vec_petsc, temp, False, PETSc.Scatter.Mode.FORWARD)
vec = temp.getArray()
if destroy:
vec_petsc.destroy()
return vec
def show_velocity(self, length_factor=1, show_nodes=True):
geo_list = uniqueList()
for obj1 in self.get_obj_list():
geo_list.append(obj1.get_u_geo())
temp_geo = base_geo()
temp_geo.combine(geo_list)
temp_geo.show_velocity(length_factor=length_factor, show_nodes=show_nodes)
return True
def show_force(self, length_factor=1, show_nodes=True):
geo_list = uniqueList()
for obj1 in self.get_obj_list():
geo_list.append(obj1.get_f_geo())
temp_geo = base_geo()
temp_geo.combine(geo_list)
temp_geo.set_velocity(self._force)
temp_geo.show_velocity(length_factor=length_factor, show_nodes=show_nodes)
return True
def show_f_nodes(self, linestyle='-'):
geo_list = uniqueList()
for obj1 in self.get_all_obj_list():
geo_list.append(obj1.get_f_geo())
temp_geo = base_geo()
temp_geo.combine(geo_list)
temp_geo.show_nodes(linestyle)
return True
def show_u_nodes(self, linestyle='-'):
geo_list = uniqueList()
for obj1 in self.get_all_obj_list():
geo_list.append(obj1.get_u_geo())
temp_geo = base_geo()
temp_geo.combine(geo_list)
temp_geo.show_nodes(linestyle)
return True
def show_f_u_nodes(self, linestyle='-'):
f_geo_list = uniqueList()
u_geo_list = uniqueList()
for obj1 in self.get_all_obj_list():
f_geo_list.append(obj1.get_f_geo())
if obj1.get_f_geo() is not obj1.get_u_geo():
u_geo_list.append(obj1.get_u_geo())
f_geo = base_geo()
f_geo.combine(f_geo_list)
u_geo = base_geo()
u_geo.combine(u_geo_list)
temp_geo = geoComposit()
temp_geo.append(u_geo)
temp_geo.append(f_geo)
temp_geo.show_nodes(linestyle)
return True
def update_location(self, eval_dt, print_handle=''):
for obj0 in self.get_obj_list():
obj0.update_location(eval_dt, print_handle)
return True
class StokesFlowObj:
# general class of object, contain general properties of objcet.
def __init__(self):
self._index = -1 # index of object
self._f_geo = base_geo() # global coordinates of force nodes
self._u_geo = base_geo() # global coordinates of velocity nodes
self._re_velocity = np.zeros([0]) # resolved information
self._force = np.zeros([0]) # force information
self._type = 'uninitialized' # object type
self._name = '...' # object name
self._n_unknown = 3
self._problem = None
self._matrix_method = None
# the following properties store the location history of the composite.
# current such kind of obj don't move.
# fix the center at u_geo.center()
self.obj_norm_hist = []
# self._locomotion_fct = np.ones(3)
# self._center_hist = []
# self._U_hist = [] # (ux,uy,uz,wx,wy,wz)
# self._displace_hist = []
# self._rotation_hist = []
def __repr__(self):
return self.get_obj_name()
def __str__(self):
return self.get_name()
def print_info(self):
PETSc.Sys.Print(' %s: father %s, type %s, index %d, force nodes %d, velocity nodes %d'
% (self.get_name(), self._problem.get_name(), self._type, self.get_index(),
self.get_n_f_node(), self.get_n_u_node()))
self.get_u_geo().print_info()
self.get_f_geo().print_info()
return True
def save_mat(self, addInfo=''):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
u_glbIdx, u_glbIdx_all = self.get_u_geo().get_glbIdx()
f_glbIdx, f_glbIdx_all = self.get_f_geo().get_glbIdx()
filename = addInfo + self._problem.get_name() + '_' + self.get_name() + '.mat'
if rank == 0:
savemat(filename,
{'fnodes': self.get_f_geo().get_nodes(),
'unodes': self.get_u_geo().get_nodes(),
'u_glbIdx': u_glbIdx,
'u_glbIdx_all': u_glbIdx_all,
'f_glbIdx': f_glbIdx,
'f_glbIdx_all': f_glbIdx_all,
'force': self._force,
're_velocity': self._re_velocity,
'velocity': self.get_u_geo().get_velocity(), },
oned_as='column')
PETSc.Sys.Print('%s: save information to %s' % (str(self), filename))
return True
def set_data(self, f_geo: base_geo, u_geo: base_geo, name='...', **kwargs):
# err_msg = 'f_geo and u_geo need geo objects contain force and velocity nodes, respectively. '
# assert isinstance(f_geo, base_geo) and isinstance(u_geo, base_geo), err_msg
self._f_geo = f_geo
self._u_geo = u_geo
self._force = np.zeros(self.get_n_f_node() * self.get_n_unknown())
self._re_velocity = np.zeros(self.get_u_nodes().size)
self._name = name
self._type = 'general obj'
return True
def set_velocity(self, velocity: np.array):
return self.get_u_geo().set_velocity(velocity)
def set_rigid_velocity(self, *args, **kwargs):
return self.get_u_geo().set_rigid_velocity(*args, **kwargs)
def get_problem(self) -> StokesFlowProblem:
return self._problem
def set_problem(self, problem: 'StokesFlowProblem'):
self._problem = problem
return True
def get_matrix_method(self):
return self._matrix_method
def set_matrix_method(self, matrix_method, **kwargs):
self._matrix_method = matrix_method
return True
def copy(self):
"""
copy a new object.
"""
problem = self._problem
self._problem = None
self.get_f_geo().destroy_dmda()
if self.get_f_geo() is not self.get_u_geo():
self.get_u_geo().destroy_dmda()
obj2 = copy.deepcopy(self) # type: StokesFlowObj
self.set_problem(problem)
obj2.set_problem(problem)
obj2.set_index(-1)
self.get_f_geo().set_dmda()
if self.get_f_geo() is not self.get_u_geo():
self.get_u_geo().set_dmda()
obj2.get_f_geo().set_dmda()
if obj2.get_f_geo() is not obj2.get_u_geo():
obj2.get_u_geo().set_dmda()
return obj2
def combine(self, obj_list: uniqueList, set_re_u=False, set_force=False,
geo_fun=base_geo):
obj_list = list(tube_flatten((obj_list,)))
fgeo_list = uniqueList()
ugeo_list = uniqueList()
for obj0 in obj_list:
err_msg = 'some object(s) in obj_list are not StokesFlowObj object. %s' % \
type(obj0)
assert isinstance(obj0, StokesFlowObj), err_msg
fgeo_list.append(obj0.get_f_geo())
ugeo_list.append(obj0.get_u_geo())
fgeo = geo_fun()
ugeo = geo_fun()
fgeo.combine(fgeo_list)
ugeo.combine(ugeo_list)
self.set_data(fgeo, ugeo, name=self.get_name())
if set_re_u:
self.set_re_velocity(np.zeros([0]))
for obj0 in obj_list:
self.set_re_velocity(np.hstack((self.get_re_velocity(), obj0.get_re_velocity())))
if set_force:
self.set_force(np.zeros([0]))
for obj0 in obj_list:
self.set_force(np.hstack((self.get_force(), obj0.get_force())))
return True
def move(self, displacement):
self.get_f_geo().move(displacement)
if self.get_f_geo() is not self.get_u_geo():
self.get_u_geo().move(displacement)
return True
def node_rotation(self, norm=np.array([0, 0, 1]), theta=0, rotation_origin=None):
self.get_f_geo().node_rotation(norm, theta, rotation_origin)
if self.get_f_geo() is not self.get_u_geo():
self.get_u_geo().node_rotation(norm, theta, rotation_origin)
return True
def zoom(self, factor, zoom_origin=None):
self.get_f_geo().node_zoom(factor, zoom_origin=zoom_origin)
if self.get_f_geo() is not self.get_u_geo():
self.get_u_geo().node_zoom(factor, zoom_origin=zoom_origin)
return True
def get_index(self):
return self._index
def get_type(self):
return self._type
def get_obj_name(self):
return self._type + ' (index %d)' % self._index
def get_name(self):
return self._name
def set_name(self, name):
self._name = name
return True
def set_index(self, new_index):
self._index = new_index
return True
def get_f_nodes(self):
return self._f_geo.get_nodes()
def get_u_nodes(self):
return self._u_geo.get_nodes()
def get_force(self):
return self._force
def get_force_x(self):
return self._force[0::self._n_unknown]
def get_force_y(self):
return self._force[1::self._n_unknown]
def get_force_z(self):
return self._force[2::self._n_unknown]
def get_total_force(self, center=None):
if center is None:
center = self.get_u_geo().get_origin()
f = self.get_force().reshape((-1, self.get_n_unknown()))
r = self.get_f_geo().get_nodes() - center
t = np.cross(r, f[:, :3]) # some solve methods may have additional degrees of freedoms.
f_t = np.hstack((f, t)).sum(axis=0)
return f_t
def set_force(self, force):
self._force = force
def get_re_velocity(self):
return self._re_velocity
def get_velocity(self):
return self.get_u_geo().get_velocity()
def set_re_velocity(self, re_velocity):
self._re_velocity = re_velocity
def get_n_f_node(self):
return self.get_f_nodes().shape[0]
def get_n_u_node(self):
return self.get_u_nodes().shape[0]
def get_n_velocity(self):
return self.get_u_nodes().shape[0] * self._n_unknown
def get_n_force(self):
return self.get_f_nodes().shape[0] * self._n_unknown
def get_n_unknown(self):
return self._n_unknown
def get_f_geo(self) -> base_geo:
return self._f_geo
def get_u_geo(self) -> base_geo:
return self._u_geo
@staticmethod
def vec_scatter(vec_petsc, destroy=True):
scatter, temp = PETSc.Scatter().toAll(vec_petsc)
scatter.scatter(vec_petsc, temp, False, PETSc.Scatter.Mode.FORWARD)
vec = temp.getArray()
if destroy:
vec_petsc.destroy()
return vec
def vtk(self, filename, stp_idx=0):
if str(self) == '...':
return
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
if rank == 0:
force_x = self._force[0::self._n_unknown].copy()
force_y = self._force[1::self._n_unknown].copy()
force_z = self._force[2::self._n_unknown].copy()
velocity_x = self._re_velocity[0::self._n_unknown].copy()
velocity_y = self._re_velocity[1::self._n_unknown].copy()
velocity_z = self._re_velocity[2::self._n_unknown].copy()
velocity_err_x = np.abs(self._re_velocity[0::self._n_unknown]
- self.get_velocity()[0::3])
velocity_err_y = np.abs(self._re_velocity[1::self._n_unknown]
- self.get_velocity()[1::3])
velocity_err_z = np.abs(self._re_velocity[2::self._n_unknown]
- self.get_velocity()[2::3])
if 'rs' in self.get_matrix_method():
filename = '%s_%s_t%05d' % (filename, str(self), stp_idx)
pointsToVTK(filename, self.get_f_nodes()[:, 0], self.get_f_nodes()[:, 1],
self.get_f_nodes()[:, 2],
data={"force": (force_x, force_y, force_z),
"velocity": (velocity_x, velocity_y, velocity_z),
"velocity_err": (velocity_err_x,
velocity_err_y,
velocity_err_z), })
else:
f_filename = '%s_%s_force_t%05d' % (filename, str(self), stp_idx)
pointsToVTK(f_filename, self.get_f_nodes()[:, 0], self.get_f_nodes()[:, 1],
self.get_f_nodes()[:, 2],
data={"force": (force_x, force_y, force_z), })
u_filename = '%s_%s_velocity_t%05d' % (filename, str(self), stp_idx)
pointsToVTK(u_filename, self.get_u_nodes()[:, 0], self.get_u_nodes()[:, 1],
self.get_u_nodes()[:, 2],
data={"velocity": (velocity_x, velocity_y, velocity_z),
"velocity_err": (velocity_err_x,
velocity_err_y,
velocity_err_z), })
return True
def show_velocity(self, length_factor=1, show_nodes=True):
self.get_u_geo().show_velocity(length_factor, show_nodes)
return True
def show_re_velocity(self, length_factor=1, show_nodes=True):
self.get_problem().check_finish_solve()
tgeo = self.get_u_geo().copy()
tgeo.set_velocity(self._re_velocity)
tgeo.show_velocity(length_factor, show_nodes)
return True
def show_force(self, length_factor=1, show_nodes=True):
self.get_problem().check_finish_solve()
tgeo = self.get_f_geo().copy()
tgeo.set_velocity(self._force)
tgeo.show_velocity(length_factor, show_nodes)
return True
def show_f_nodes(self, linestyle='-'):
self.get_f_geo().show_nodes(linestyle)
return True
def show_u_nodes(self, linestyle='-'):
self.get_u_geo().show_nodes(linestyle)
return True
def show_f_u_nodes(self, linestyle='-'):
temp_geo = geoComposit()
temp_geo.append(self.get_u_geo())
if self.get_u_geo() is not self.get_f_geo():
temp_geo.append(self.get_f_geo())
temp_geo.show_nodes(linestyle)
return True
def pickmyself_prepare(self):
self.get_f_geo().pickmyself_prepare()
self.get_u_geo().pickmyself_prepare()
return True
def unpick_myself(self):
self.get_u_geo().set_dmda()
self.get_f_geo().set_dmda()
return True
def update_location(self, eval_dt, print_handle=''):
self.obj_norm_hist.append(self.get_u_geo().get_geo_norm())
return True
def get_obj_norm_hist(self):
return self.obj_norm_hist
class StokesFlowRingObj(StokesFlowObj):
def check_nodes(self, nodes):
err_msg = 'nodes are distribute in the line (r, 0, z). '
assert np.allclose(nodes[:, 1], 0), err_msg
return True
def set_data(self, f_geo: base_geo, u_geo: base_geo, name='...', **kwargs):
self.check_nodes(f_geo.get_nodes())
self.check_nodes(u_geo.get_nodes())
return super().set_data(f_geo, u_geo, name, **kwargs)
def get_total_force_sum(self, center=None):
n_c = self.get_problem().get_kwargs()['n_c']
f_t = super().get_total_force(center) * n_c
return f_t
def get_total_force_int(self, center=None):
if center is None:
center = self.get_u_geo().get_origin()
fnodes = self.get_f_nodes()
rf = np.vstack((fnodes[:, 0], fnodes[:, 0], fnodes[:, 0],)).T
f = self.get_force().reshape((-1, self.get_n_unknown())) * rf
r = self.get_f_geo().get_nodes() - center
t = np.cross(r, f[:, :3]) # some solve methods may have additional degrees of freedoms.
f_t = np.hstack((f, t)).sum(axis=0) * 2 * np.pi
return f_t
def get_total_force(self, center=None):
return self.get_total_force_int(center)
# return self.get_total_force_sum(center)
class StokesletsRingObjFull(StokesFlowObj):
def __init__(self):
super().__init__()
self._n_c = -1 # amount of copies along the symmetical axis
def set_data(self, f_geo: '_revolve_geo', u_geo: '_revolve_geo', name='...', **kwargs):
n_c = kwargs['n_c']
self._n_c = n_c
u_geo.create_full_geo(n_c)
f_geo.create_full_geo(n_c)
super().set_data(f_geo, u_geo, name, **kwargs)
def show_slice_force(self, idx=0, length_factor=1, show_nodes=True):
self.get_problem().check_finish_solve()
n_c = self._n_c
assert idx < n_c
tnodes = self.get_f_nodes()[idx::n_c, :]
tforce = self.get_force().reshape((-1, 3))[idx::n_c, :].flatten()
tgeo = base_geo()
tgeo.set_nodes(tnodes, self.get_f_geo().get_deltaLength())
tgeo.set_velocity(tforce)
tgeo.show_velocity(length_factor, show_nodes)
return True
class StokesletsInPipeProblem(StokesFlowProblem):
# pipe center line along z axis
def __init__(self, **kwargs):
from src.stokesletsInPipe import detail_light
super().__init__(**kwargs)
self._fpgeo = base_geo() # force geo of pipe
self._vpgeo = base_geo() # velocity geo of pipe
self._cpgeo = base_geo() # check geo of pipe
self._m_pipe = PETSc.Mat().create(comm=PETSc.COMM_WORLD)
self._m_pipe_check = PETSc.Mat().create(comm=PETSc.COMM_WORLD)
self._b_list = np.ones(0)
# create a empty matrix, and a empty velocity vecters, to avoid use too much time to allocate memory.
# for numerical part
self._t_m = PETSc.Mat().create(
comm=PETSc.COMM_WORLD) # M matrix associated with u1 part ,velocity due to pipe boundary.
self._t_u11 = uniqueList() # a list contain three u1 component of f1, for interpolation
self._t_u12 = uniqueList() # a list contain three u1 component of f2, for interpolation
self._t_u13 = uniqueList() # a list contain three u1 component of f3, for interpolation
self._set_f123()
self._stokeslet_m = PETSc.Mat().create(
comm=PETSc.COMM_WORLD) # M matrix associated with stokeslet singularity
self._t_u2 = PETSc.Vec().create(comm=PETSc.COMM_WORLD)
# for theoretical part
# DBG
OptDB = PETSc.Options()
dbg_threshold = OptDB.getReal('dbg_threshold', 10)
PETSc.Sys.Print('--------------------> DBG: dbg_threshold = %f' % dbg_threshold)
self._greenFun = detail_light(threshold=dbg_threshold)
self._greenFun.solve_prepare_light()
dbg_z_the_threshold = OptDB.getReal('dbg_z_the_threshold', np.inf)
PETSc.Sys.Print('--------------------> DBG: dbg_z_the_threshold = %f' % dbg_z_the_threshold)
self._z_the_threshold = dbg_z_the_threshold
self._f1_list = [] # list of forces lists for each object at or outside pipe associated with force-nodes at x axis
self._f2_list = [] # list of forces lists for each object at or outside pipe associated with force-nodes at y axis
self._f3_list = [] # list of forces lists for each object at or outside pipe associated with force-nodes at z axis
self._residualNorm_list = [] # residualNorm of f1, f2, and f3 of different b
self._err_list = [] # relative velocity error solved using check geo.
# # set values later
# self._dp = np.nan
# self._rp = np.nan
# self._lp = np.nan
# self._ep = np.nan
# self._th = np.nan
# self._with_cover = np.nan
self._stokesletsInPipe_pipeFactor = np.nan
def _set_f123(self):
# set point source vector f1, f2, f3.
fgeo = base_geo()
fgeo.set_nodes((0, 0, 0), deltalength=0)
t_f_pkg = PETSc.DMComposite().create(comm=PETSc.COMM_WORLD)
t_f_pkg.addDM(fgeo.get_dmda())
t_f_pkg.setFromOptions()
t_f_pkg.setUp()
f_isglb = t_f_pkg.getGlobalISs()
fgeo.set_glbIdx(f_isglb[0].getIndices())
f1_petsc = t_f_pkg.createGlobalVector()
f2_petsc = t_f_pkg.createGlobalVector()
f3_petsc = t_f_pkg.createGlobalVector()
f1_petsc[:] = (1, 0, 0)
f2_petsc[:] = (0, 1, 0)
f3_petsc[:] = (0, 0, 1)
f1_petsc.assemble()
f2_petsc.assemble()
f3_petsc.assemble()
t_f_pkg.destroy()
self._f123_petsc = [f1_petsc, f2_petsc, f3_petsc]
self._stokeslet_geo = fgeo
return True
def _check_add_obj(self, obj):
_, b, _ = obj.get_f_geo().get_polar_coord()
b_list = self.get_b_list()
b1 = np.max(b_list)
err_msg = 'b is out of maximum %f' % b1
assert all(b <= b1), err_msg
return True
def get_b_list(self):
return self._b_list
def get_residualNorm_list(self):
return self._residualNorm_list
def get_err_list(self):
return self._err_list
def set_b_list(self, b_list):
self._b_list = b_list
return True
def get_n_b(self):
return self._b_list.size
def get_fpgeo(self):
return self._fpgeo
def get_vpgeo(self):
return self._vpgeo
def debug_solve_stokeslets_b(self, b, node):
t_geo = base_geo()
t_geo.set_nodes(node, deltalength=0)
obj1 = StokesFlowObj()
obj1.set_data(t_geo, t_geo)
u_glbIdx = self._set_temp_var(obj1)
return self._solve_stokeslets_b_num(b, node, use_cart=True, u_glbIdx_all=u_glbIdx)
def debug_solve_u_pipe(self, pgeo, outputHandle, greenFun):
return self._solve_u1_pipe(pgeo, outputHandle, greenFun)
def debug_solve_stokeslets_fnode(self, fnode, geo1):
unodes = geo1.get_nodes()
obj1 = StokesFlowObj()
obj1.set_data(geo1, geo1)
t_u_glbIdx_all = self._set_temp_var(obj1)
u_fx_petsc, u_fy_petsc, u_fz_petsc = self._solve_stokeslets_fnode(fnode, unodes,
t_u_glbIdx_all)
obj1.set_velocity(u_fx_petsc.getArray())
obj1.show_velocity(show_nodes=False, length_factor=1)
obj1.set_velocity(u_fy_petsc.getArray())
obj1.show_velocity(show_nodes=False, length_factor=1)
obj1.set_velocity(u_fz_petsc.getArray())
obj1.show_velocity(show_nodes=False, length_factor=1)
return True
def _solve_u1_b_list(self, k, ugeo, use_cart=False):
# solve velocity component due to boundary as a function of b and u_node location. Here, b is in self._b_list.
# total u = u1 + u2, u1: force at (or outside) pipe boundary
kwargs = self.get_kwargs()
temp_m = self._t_m
temp_obj1 = StokesFlowObj()
temp_obj1.set_data(self._fpgeo, ugeo)
self._method_dict[kwargs['matrix_method']](temp_obj1, temp_obj1, temp_m, **kwargs)
temp_m.assemble()
temp_m = self._t_m
for i0, ID in enumerate(k):
f1 = self._f1_list[ID]
f2 = self._f2_list[ID]
f3 = self._f3_list[ID]
temp_m.mult(f1, self._t_u11[i0])
temp_m.mult(f2, self._t_u12[i0])
temp_m.mult(f3, self._t_u13[i0])
if not use_cart:
uphi, _, _ = ugeo.get_polar_coord()
# Transform to polar coord
ux1 = self._t_u11[i0][0::3].copy()
uy1 = self._t_u11[i0][1::3].copy()
uz1 = self._t_u11[i0][2::3].copy()
uR1 = np.cos(uphi) * ux1 + np.sin(uphi) * uy1
uPhi1 = -np.sin(uphi) * ux1 + np.cos(uphi) * uy1
self._t_u11[i0][:] = np.dstack((uR1, uPhi1, uz1)).flatten()
ux2 = self._t_u12[i0][0::3].copy()
uy2 = self._t_u12[i0][1::3].copy()
uz2 = self._t_u12[i0][2::3].copy()
uR2 = np.cos(uphi) * ux2 + np.sin(uphi) * uy2
uPhi2 = -np.sin(uphi) * ux2 + np.cos(uphi) * uy2
self._t_u12[i0][:] = np.dstack((uR2, uPhi2, uz2)).flatten()
ux3 = self._t_u13[i0][0::3].copy()
uy3 = self._t_u13[i0][1::3].copy()
uz3 = self._t_u13[i0][2::3].copy()
uR3 = np.cos(uphi) * ux3 + np.sin(uphi) * uy3
uPhi3 = -np.sin(uphi) * ux3 + np.cos(uphi) * uy3
self._t_u13[i0][:] = np.dstack((uR3, uPhi3, uz3)).flatten()
self._t_u11[i0].assemble()
self._t_u12[i0].assemble()
self._t_u13[i0].assemble()
return True
def _solve_stokeslets_b_num(self, b, unode_xyz, use_cart=False, u_glbIdx_all=[]):
from src.StokesFlowMethod import point_force_matrix_3d_petsc
# velocity due to stokesles.
kwargs = self.get_kwargs()
stokeslet_geo = self._stokeslet_geo
stokeslet_node = np.hstack((b, 0, 0))
stokeslet_geo.set_nodes(stokeslet_node, deltalength=0)
ugeo = base_geo()
ugeo.set_nodes(unode_xyz, deltalength=0)
ugeo.set_glbIdx_all(u_glbIdx_all)
obj1 = StokesFlowObj()
obj1.set_data(stokeslet_geo, ugeo)
stokeslet_m = self._stokeslet_m
point_force_matrix_3d_petsc(obj1, obj1, stokeslet_m, **kwargs)
stokeslet_m.assemble()
# velocity due to boundary, lagrange interploation
b_list = self.get_b_list()
clsID = min(range(len(b_list)), key=lambda i: abs(b_list[i] - b)) # index of closest b
u_petsc = []
if b_list[clsID] == b_list[0]: # top of the list
k = [0, 1, 2]
self._solve_u1_b_list(k, ugeo, use_cart)
elif b_list[clsID] == b_list[-1]: # botton of the list
k = [-3, -2, -1]
self._solve_u1_b_list(k, ugeo, use_cart)
else:
k = [clsID - 1, clsID, clsID + 1]
self._solve_u1_b_list(k, ugeo, use_cart)
l1 = ((b - b_list[k[1]]) * (b - b_list[k[2]])) / (
(b_list[k[0]] - b_list[k[1]]) * (b_list[k[0]] - b_list[k[2]]))
l2 = ((b - b_list[k[0]]) * (b - b_list[k[2]])) / (
(b_list[k[1]] - b_list[k[0]]) * (b_list[k[1]] - b_list[k[2]]))
l3 = ((b - b_list[k[0]]) * (b - b_list[k[1]])) / (
(b_list[k[2]] - b_list[k[0]]) * (b_list[k[2]] - b_list[k[1]]))
# ux
t_u1 = self._t_u11[0] * l1 + self._t_u11[1] * l2 + self._t_u11[
2] * l3 # velocity due to boundary.
stokeslet_m.mult(self._f123_petsc[0], self._t_u2)
u_petsc.append(t_u1 + self._t_u2)
# uy
t_u1 = self._t_u12[0] * l1 + self._t_u12[1] * l2 + self._t_u12[
2] * l3 # velocity due to boundary.
stokeslet_m.mult(self._f123_petsc[1], self._t_u2)
u_petsc.append(t_u1 + self._t_u2)
# uz
t_u1 = self._t_u13[0] * l1 + self._t_u13[1] * l2 + self._t_u13[
2] * l3 # velocity due to boundary.
stokeslet_m.mult(self._f123_petsc[2], self._t_u2)
u_petsc.append(t_u1 + self._t_u2)
return u_petsc
def _solve_stokeslets_b_the(self, b, unode_rpz, use_cart=False):
comm = PETSc.COMM_WORLD.tompi4py()
dmda_the = PETSc.DMDA().create(sizes=(unode_rpz.shape[0],), dof=3, stencil_width=0,
comm=PETSc.COMM_WORLD)
t_u_pkg = PETSc.DMComposite().create(comm=PETSc.COMM_WORLD)
t_u_pkg.addDM(dmda_the)
t_u_pkg.setFromOptions()
t_u_pkg.setUp()
u1 = t_u_pkg.createGlobalVector()
u2 = t_u_pkg.createGlobalVector()
u3 = t_u_pkg.createGlobalVector()
u_isglb = t_u_pkg.getGlobalISs()
u_glbIdx_all = np.hstack(comm.allgather(u_isglb[0].getIndices()))
t_u_pkg.destroy()
greenFun = self._greenFun
greenFun.set_b(b=b)
greenFun.solve_prepare_b()
t_dmda_range = range(dmda_the.getRanges()[0][0], dmda_the.getRanges()[0][1])
t_Vec_range = u1.getOwnershipRange()
for i0 in t_dmda_range:
R, phi, z = unode_rpz[i0]
t_u_glbIdx = u_glbIdx_all[i0 * 3]
sign_z = np.sign(z)
abs_z = np.abs(z)
uR1, uPhi1, uz1, uR2, uPhi2, uz2, uR3, uPhi3, uz3 = \
greenFun.solve_u_light(R, phi, abs_z)
u1[t_u_glbIdx:t_u_glbIdx + 3] = [uR1, uPhi1, sign_z * uz1]
u2[t_u_glbIdx:t_u_glbIdx + 3] = [uR2, uPhi2, sign_z * uz2]
u3[t_u_glbIdx:t_u_glbIdx + 3] = [sign_z * uR3, sign_z * uPhi3, uz3]
if use_cart:
phi = unode_rpz[t_dmda_range, 1]
t_Vec_range_x = range(t_Vec_range[0] + 0, t_Vec_range[1], 3)
t_Vec_range_y = range(t_Vec_range[0] + 1, t_Vec_range[1], 3)
t_ux1 = np.cos(phi) * u1[t_Vec_range_x] - np.sin(phi) * u1[t_Vec_range_y]
t_ux2 = np.cos(phi) * u2[t_Vec_range_x] - np.sin(phi) * u2[t_Vec_range_y]
t_ux3 = np.cos(phi) * u3[t_Vec_range_x] - np.sin(phi) * u3[t_Vec_range_y]
t_uy1 = np.sin(phi) * u1[t_Vec_range_x] + np.cos(phi) * u1[t_Vec_range_y]
t_uy2 = np.sin(phi) * u2[t_Vec_range_x] + np.cos(phi) * u2[t_Vec_range_y]
t_uy3 = np.sin(phi) * u3[t_Vec_range_x] + np.cos(phi) * u3[t_Vec_range_y]
u1[t_Vec_range_x] = t_ux1
u1[t_Vec_range_y] = t_uy1
u2[t_Vec_range_x] = t_ux2
u2[t_Vec_range_y] = t_uy2
u3[t_Vec_range_x] = t_ux3
u3[t_Vec_range_y] = t_uy3
u1.assemble()
u2.assemble()
u3.assemble()
u_petsc = (u1, u2, u3)
# PETSc.Sys.Print(unode_rpz.size)
return u_petsc
def _solve_stokeslets_fnode(self, fnode, unodes, u_glbIdx_all=[]):
fnode = fnode.reshape((1, 3))
unodes = unodes.reshape((-1, 3))
ugeo = base_geo()
ugeo.set_nodes(unodes, resetVelocity=True, deltalength=0)
uphi, urho, uz = ugeo.get_polar_coord()
fgeo = base_geo()
fgeo.set_nodes(fnode, resetVelocity=True, deltalength=0)
fphi, frho, fz = fgeo.get_polar_coord()
# calculate ux, uy, uz in local coordinate, definding by fnode.
b = frho
R = urho
phi = uphi - fphi
x = R * np.cos(phi)
y = R * np.sin(phi)
z = uz - fz
t_node_xyz = np.vstack((x, y, z)).T
u_fx_petsc, u_fy_petsc, u_fz_petsc = \
self._solve_stokeslets_b_num(b, t_node_xyz, True, u_glbIdx_all)
temp1 = np.abs(z) > self._z_the_threshold
if any(temp1):
theIdx = np.dstack((temp1, temp1, temp1)).flatten()
t_node_rpz = np.vstack((R[temp1], phi[temp1], z[temp1])).T
u_glbIdx_the = u_glbIdx_all[theIdx]
u_fx_petsc_the, u_fy_petsc_the, u_fz_petsc_the = \
self._solve_stokeslets_b_the(b, t_node_rpz, True)
t_range = range(u_fx_petsc_the.getOwnershipRange()[0],
u_fx_petsc_the.getOwnershipRange()[1])
temp2 = np.dstack((z, z, z)).flatten()
temp3 = np.abs(temp2[u_glbIdx_the[t_range]])
t_factor = np.abs((temp3 - self._z_the_threshold) /
(self._lp / 2 - self._z_the_threshold))
u_fx_petsc[u_glbIdx_the[t_range]] = \
u_fx_petsc_the.getArray() * t_factor + \
u_fx_petsc[u_glbIdx_the[t_range]] * (1 - t_factor)
u_fy_petsc[u_glbIdx_the[t_range]] = \
u_fy_petsc_the.getArray() * t_factor + \
u_fy_petsc[u_glbIdx_the[t_range]] * (1 - t_factor)
u_fz_petsc[u_glbIdx_the[t_range]] = \
u_fz_petsc_the.getArray() * t_factor + \
u_fz_petsc[u_glbIdx_the[t_range]] * (1 - t_factor)
u_fx_loc = u_fx_petsc.getArray()
u_fy_loc = u_fy_petsc.getArray()
u_fz_loc = u_fz_petsc.getArray()
# shift to global coordinate
theta = np.arctan2(fnode[0, 1], fnode[0, 0])
T = np.array(((np.cos(theta), np.sin(theta), 0),
(-np.sin(theta), np.cos(theta), 0),
(0, 0, 1)))
Tinv = np.array(((np.cos(theta), -np.sin(theta), 0),
(np.sin(theta), np.cos(theta), 0),
(0, 0, 1)))
temp_loc = np.dstack(
(u_fx_loc.reshape((-1, 3)), u_fy_loc.reshape((-1, 3)), u_fz_loc.reshape((-1, 3))))
temp_glb = np.tensordot(Tinv, np.tensordot(temp_loc, T, axes=(2, 0)), axes=(1, 1))
u_fx_glb = np.dstack((temp_glb[0, :, 0], temp_glb[1, :, 0], temp_glb[2, :, 0])).flatten()
u_fy_glb = np.dstack((temp_glb[0, :, 1], temp_glb[1, :, 1], temp_glb[2, :, 1])).flatten()
u_fz_glb = np.dstack((temp_glb[0, :, 2], temp_glb[1, :, 2], temp_glb[2, :, 2])).flatten()
u_fx_petsc.setValues(range(u_fx_petsc.getOwnershipRange()[0],
u_fx_petsc.getOwnershipRange()[1]), u_fx_glb)
u_fy_petsc.setValues(range(u_fy_petsc.getOwnershipRange()[0],
u_fy_petsc.getOwnershipRange()[1]), u_fy_glb)
u_fz_petsc.setValues(range(u_fz_petsc.getOwnershipRange()[0],
u_fz_petsc.getOwnershipRange()[1]), u_fz_glb)
return u_fx_petsc, u_fy_petsc, u_fz_petsc
def _check_f_accuracy(self, b, greenFun, waitBar=np.array((1, 1)), **kwargs):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
fileHandle = self._kwargs['fileHandle']
cpgeo = self._cpgeo
fpgeo = self._fpgeo
outputHandle = 'check'
a_u11, a_u21, a_u31 = self._solve_u1_pipe(cpgeo, outputHandle, greenFun, waitBar)
m_petsc = self._m_pipe_check
c_u11_petsc = m_petsc.createVecLeft()
# c_u11_petsc.set(0)
m_petsc.mult(self._f1_list[-1], c_u11_petsc)
c_u11 = self.vec_scatter(c_u11_petsc, destroy=True)
c_u21_petsc = m_petsc.createVecLeft()
# c_u21_petsc.set(0)
m_petsc.mult(self._f2_list[-1], c_u21_petsc)
c_u21 = self.vec_scatter(c_u21_petsc, destroy=True)
c_u31_petsc = m_petsc.createVecLeft()
# c_u31_petsc.set(0)
m_petsc.mult(self._f3_list[-1], c_u31_petsc)
c_u31 = self.vec_scatter(c_u31_petsc, destroy=True)
err1 = np.sqrt(np.sum((a_u11 - c_u11) ** 2) / np.sum(a_u11 ** 2))
err2 = np.sqrt(np.sum((a_u21 - c_u21) ** 2) / np.sum(a_u21 ** 2))
err3 = np.sqrt(np.sum((a_u31 - c_u31) ** 2) / np.sum(a_u31 ** 2))
PETSc.Sys().Print(' relative err: %f, %f, %f' % (err1, err2, err3))
self._err_list.append((err1, err2, err3))
f1 = self.vec_scatter(self._f1_list[-1], destroy=False)
f2 = self.vec_scatter(self._f2_list[-1], destroy=False)
f3 = self.vec_scatter(self._f3_list[-1], destroy=False)
if rank == 0:
savemat('%s_%s_b%.5f_u.mat' % (fileHandle, outputHandle, b),
{'u11_num': a_u11,
'u21_num': a_u21,
'u31_num': a_u31,
'u11_ana': c_u11,
'u21_ana': c_u21,
'u31_ana': c_u31,
'nodes': cpgeo.get_nodes(),
'kwargs': self.get_kwargs(),
'fnodes': fpgeo.get_nodes(),
'f1': f1,
'f2': f2,
'f3': f3, },
oned_as='column')
t_filename = '%s_%s_b%.5f_u' % (fileHandle, outputHandle, b)
a_u11 = np.asfortranarray(a_u11.reshape(-1, 3))
a_u21 = np.asfortranarray(a_u21.reshape(-1, 3))
a_u31 = np.asfortranarray(a_u31.reshape(-1, 3))
c_u11 = np.asfortranarray(c_u11.reshape(-1, 3))
c_u21 = np.asfortranarray(c_u21.reshape(-1, 3))
c_u31 = np.asfortranarray(c_u31.reshape(-1, 3))
e_u11 = a_u11 - c_u11
e_u21 = a_u21 - c_u21
e_u31 = a_u31 - c_u31
pointsToVTK(t_filename, cpgeo.get_nodes()[:, 0], cpgeo.get_nodes()[:, 1],
cpgeo.get_nodes()[:, 2],
data={"velocity_ana1": (a_u11[:, 0], a_u11[:, 1], a_u11[:, 2]),
"velocity_ana2": (a_u21[:, 0], a_u21[:, 1], a_u21[:, 2]),
"velocity_ana3": (a_u31[:, 0], a_u31[:, 1], a_u31[:, 2]),
"velocity_num1": (c_u11[:, 0], c_u11[:, 1], c_u11[:, 2]),
"velocity_num2": (c_u21[:, 0], c_u21[:, 1], c_u21[:, 2]),
"velocity_num3": (c_u31[:, 0], c_u31[:, 1], c_u31[:, 2]),
"velocity_err1": (e_u11[:, 0], e_u11[:, 1], e_u11[:, 2]),
"velocity_err2": (e_u21[:, 0], e_u21[:, 1], e_u21[:, 2]),
"velocity_err3": (e_u31[:, 0], e_u31[:, 1], e_u31[:, 2]), })
t_filename = '%s_%s_b%.5f_force' % (fileHandle, outputHandle, b)
f1 = np.asfortranarray(f1.reshape(-1, 3))
f2 = np.asfortranarray(f2.reshape(-1, 3))
f3 = np.asfortranarray(f3.reshape(-1, 3))
pointsToVTK(t_filename, fpgeo.get_nodes()[:, 0], fpgeo.get_nodes()[:, 1],
fpgeo.get_nodes()[:, 2],
data={"force1": (f1[:, 0], f1[:, 1], f1[:, 2]),
"force2": (f2[:, 0], f2[:, 1], f2[:, 2]),
"force3": (f3[:, 0], f3[:, 1], f3[:, 2]), })
# t_filename = '%s_%s_b%.5f_velocity' % (fileHandle, outputHandle, b)
return True
def set_prepare(self, fileHandle, fullpath=False):
fileHandle = check_file_extension(fileHandle, '_force_pipe.mat')
if fullpath:
mat_contents = loadmat(fileHandle)
else:
t_path = os.path.dirname(os.path.abspath(__file__))
full_path = os.path.normpath(t_path + '/' + fileHandle)
mat_contents = loadmat(full_path)
self.set_b_list(mat_contents['b'].flatten())
self._f1_list = [f1 for f1 in mat_contents['f1_list']]
self._f2_list = [f2 for f2 in mat_contents['f2_list']]
self._f3_list = [f3 for f3 in mat_contents['f3_list']]
self._residualNorm_list = mat_contents['residualNorm'].tolist()
self._err_list = mat_contents['err'].tolist()
# self._dp = mat_contents['dp'][0, 0]
# self._rp = mat_contents['rp'][0, 0]
# self._lp = mat_contents['lp'][0, 0]
# self._ep = mat_contents['ep'][0, 0]
# self._th = mat_contents['th'][0, 0]
# self._with_cover = mat_contents['with_cover'][0, 0]
# self._stokesletsInPipe_pipeFactor = mat_contents['stokesletsInPipe_pipeFactor'][0, 0]
kwargs = self.get_kwargs()
# kwargs['dp'] = self._dp
# kwargs['rp'] = self._rp
# kwargs['lp'] = self._lp
# kwargs['ep'] = self._ep
# kwargs['th'] = self._th
# kwargs['with_cover'] = self._with_cover
# kwargs['stokesletsInPipe_pipeFactor'] = self._stokesletsInPipe_pipeFactor
self._kwargs['unpickedPrb'] = True
self._kwargs = kwargs
self._pipe_geo_load(mat_contents)
# PETSC version
self._f_list_numpy2PETSC()
return True
def solve_prepare(self):
from src.stokesletsInPipe import detail
kwargs = self.get_kwargs()
self._dp = kwargs['dp']
self._rp = kwargs['rp']
self._lp = kwargs['lp']
self._ep = kwargs['ep']
self._th = kwargs['th']
self._with_cover = kwargs['with_cover']
self._stokesletsInPipe_pipeFactor = kwargs['stokesletsInPipe_pipeFactor']
self._b_list = np.linspace(kwargs['b0'], kwargs['b1'],
kwargs['nb']) # list of b (force location).
PETSc.Sys.Print(' b_list: ')
PETSc.Sys.Print(self.get_b_list())
self._f1_list.clear()
self._f2_list.clear()
self._f3_list.clear()
self._pipe_geo_generate(**kwargs)
self._solve_m_pipe(**kwargs)
ini_guess = (None, None, None,)
for i0, b in enumerate(self.get_b_list()):
greenFun = detail(threshold=self._th, b=b)
greenFun.solve_prepare()
waitBar = np.array((i0 + 1, self.get_n_b()))
problem_u1, problem_u2, problem_u3 = self._solve_f_pipe(b, ini_guess, greenFun, waitBar,
**kwargs)
# # numpy based version
# self._f1_list.append(self.vec_scatter(problem_u1.get_force_petsc()))
# self._f2_list.append(self.vec_scatter(problem_u2.get_force_petsc()))
# self._f3_list.append(self.vec_scatter(problem_u3.get_force_petsc()))
# PETSC based version
self._f1_list.append(problem_u1.get_force_petsc())
self._f2_list.append(problem_u2.get_force_petsc())
self._f3_list.append(problem_u3.get_force_petsc())
self._residualNorm_list.append(
(problem_u1.get_residualNorm(), problem_u2.get_residualNorm(),
problem_u3.get_residualNorm()))
if kwargs['check_acc']:
self._check_f_accuracy(b, greenFun, waitBar, **kwargs)
self._m_pipe.destroy()
self._m_pipe_check.destroy()
return True
def get_f_list(self):
# PETSC version
self._f_list_PETSC2numpy()
return self._f1_list, self._f2_list, self._f3_list
def _pipe_geo_generate(self, **kwargs):
dp = self._dp
rp = self._rp
lp = self._lp
ep = self._ep
with_cover = self._with_cover
stokesletsInPipe_pipeFactor = self._stokesletsInPipe_pipeFactor
vpgeo = tunnel_geo() # velocity node geo of pipe
dth = 2 * np.arcsin(dp / 2 / rp)
# debug
# OptDB = PETSc.Options()
# stokesletsInPipe_pipeFactor = OptDB.getReal('dbg_factor', 2.5)
# PETSc.Sys.Print('--------------------> DBG: stokesletsInPipe_pipeFactor=%f' % stokesletsInPipe_pipeFactor)
fpgeo = vpgeo.create_deltatheta(dth=dth, radius=rp, length=lp, epsilon=ep,
with_cover=with_cover,
factor=stokesletsInPipe_pipeFactor)
t_pkg = PETSc.DMComposite().create(comm=PETSc.COMM_WORLD)
t_pkg.addDM(fpgeo.get_dmda())
t_pkg.setFromOptions()
t_pkg.setUp()
t_isglb = t_pkg.getGlobalISs()
fpgeo.set_glbIdx(t_isglb[0].getIndices())
# cbd_geo = geo()
# cbd_geo.combine(geo_list=[vpgeo, fpgeo, ])
# cbd_geo.show_nodes(linestyle='-')
self._fpgeo = fpgeo
self._vpgeo = vpgeo
if self._kwargs['plot_geo']:
fpgeo.show_nodes(linestyle='-')
vpgeo.show_nodes(linestyle='-')
if kwargs['check_acc']:
cpgeo = tunnel_geo()
# a simple method to control the # of nodes on the pipe boundary
tmp_fun = lambda dth: cpgeo.create_deltatheta(dth=dth, radius=rp, length=lp, epsilon=0,
with_cover=2,
factor=1).get_n_nodes()
dth1 = 0.1 # guess 1
dth2 = 0.01 # guess 2
dth_min = dth2 # memory limit
tnode = 7000 # expect # of nodes
for _ in np.arange(10):
nnode1 = tmp_fun(dth1)
nnode2 = tmp_fun(dth2)
if np.abs(nnode2 - tnode) < tnode * 0.1:
break
tdth = (tnode - nnode1) * (dth2 - dth1) / (nnode2 - nnode1) + dth1
dth1 = dth2
dth2 = np.max((tdth, (dth_min + dth1) / 2))
cpgeo = tunnel_geo()
cpgeo.create_deltatheta(dth=dth2, radius=rp, length=lp, epsilon=0, with_cover=2,
factor=1)
self._cpgeo = cpgeo
if self._kwargs['plot_geo']:
cpgeo.show_nodes(linestyle='-')
# if kwargs['plot_geo']:
# temp_geo = geoComposit()
# temp_geo.append(vpgeo)
# temp_geo.append(fpgeo)
# temp_geo.show_nodes(linestyle='-')
return True
def _pipe_geo_load(self, mat_contents):
vpgeo = base_geo()
vpgeo.set_nodes(mat_contents['vp_nodes'], deltalength=0)
fpgeo = base_geo()
fpgeo.set_nodes(mat_contents['fp_nodes'], deltalength=0)
t_pkg = PETSc.DMComposite().create(comm=PETSc.COMM_WORLD)
t_pkg.addDM(fpgeo.get_dmda())
t_pkg.setFromOptions()
t_pkg.setUp()
t_isglb = t_pkg.getGlobalISs()
fpgeo.set_glbIdx(t_isglb[0].getIndices())
t_pkg = PETSc.DMComposite().create(comm=PETSc.COMM_WORLD)
t_pkg.addDM(vpgeo.get_dmda())
t_pkg.setFromOptions()
t_pkg.setUp()
t_isglb = t_pkg.getGlobalISs()
vpgeo.set_glbIdx(t_isglb[0].getIndices())
self._fpgeo = fpgeo
self._vpgeo = vpgeo
# self._cpgeo = None
return True
def _solve_m_pipe(self, **kwargs):
# generate geo and associated nodes: a finite length pipe with covers at both side.
t0 = time()
obj1 = StokesFlowObj()
obj1.set_data(self._fpgeo, self._vpgeo)
PETSc.Sys().Print(
'Stokeslets in pipe prepare, contain %d nodes' % self._vpgeo.get_n_nodes())
self._m_pipe = self.create_obj_matrix(obj1, obj1, copy_obj=False, **kwargs)
if kwargs['check_acc']:
obj2 = StokesFlowObj()
obj2.set_data(self._fpgeo, self._cpgeo)
PETSc.Sys().Print('Stokeslets in pipe check, contain %d nodes' %
self._cpgeo.get_n_nodes())
self._m_pipe_check = self.create_obj_matrix(obj2, obj2, copy_obj=False, **kwargs)
t1 = time()
PETSc.Sys().Print(' create matrix use %fs:' % (t1 - t0))
return True
def _solve_f_pipe(self, b, ini_guess, greenFun, waitBar=np.array((1, 1)), **kwargs):
# calculate force at each nodes at (or outside) the pipe boundary.
vpgeo = self._vpgeo
outputHandle = 'vpgeo'
u11, u21, u31 = self._solve_u1_pipe(vpgeo, outputHandle, greenFun, waitBar)
# for each direction, solve force at (or outside) nodes.
fpgeo = self._fpgeo
kwargs_u1 = kwargs.copy()
kwargs_u1['deltaLength'] = self._dp
kwargs_u1['epsilon'] = self._ep
kwargs_u1['delta'] = self._dp * self._ep
kwargs_u1['name'] = ' _%05d/%05d_u1' % (waitBar[0], waitBar[1])
kwargs_u1['plot'] = False
kwargs_u1['fileHandle'] = 'stokesletsInPipeProblem_u1'
kwargs_u1['restart'] = False
kwargs_u1['getConvergenceHistory'] = False
kwargs_u1['pickProblem'] = False
problem_u1 = StokesFlowProblem(**kwargs_u1)
obj_u1 = StokesFlowObj()
obj_u1_kwargs = {'name': 'stokesletsInPipeObj_u1'}
vpgeo.set_velocity(u11)
obj_u1.set_data(fpgeo, vpgeo, **obj_u1_kwargs)
problem_u1.add_obj(obj_u1)
problem_u1.set_matrix(self._m_pipe)
problem_u1.solve(ini_guess=ini_guess[0])
kwargs_u2 = kwargs_u1.copy()
kwargs_u2['name'] = ' _%05d/%05d_u2' % (waitBar[0], waitBar[1])
kwargs_u2['fileHandle'] = 'stokesletsInPipeProblem_u2'
problem_u2 = StokesFlowProblem(**kwargs_u2)
obj_u2 = StokesFlowObj()
obj_u2_kwargs = {'name': 'stokesletsInPipeObj_u2'}
vpgeo.set_velocity(u21)
obj_u2.set_data(fpgeo, vpgeo, **obj_u2_kwargs)
problem_u2.add_obj(obj_u2)
problem_u2.set_matrix(self._m_pipe)
problem_u2.solve(ini_guess=ini_guess[1])
kwargs_u3 = kwargs_u1.copy()
kwargs_u3['name'] = ' _%05d/%05d_u3' % (waitBar[0], waitBar[1])
kwargs_u3['fileHandle'] = 'stokesletsInPipeProblem_u3'
problem_u3 = StokesFlowProblem(**kwargs_u3)
obj_u3 = StokesFlowObj()
obj_u3_kwargs = {'name': 'stokesletsInPipeObj_u3'}
vpgeo.set_velocity(u31)
obj_u3.set_data(fpgeo, vpgeo, **obj_u3_kwargs)
problem_u3.add_obj(obj_u3)
problem_u3.set_matrix(self._m_pipe)
problem_u3.solve(ini_guess=ini_guess[2])
return problem_u1, problem_u2, problem_u3
def _solve_u1_pipe(self, pgeo, outputHandle, greenFun, waitBar=np.array((1, 1))):
t0 = time()
from src.StokesFlowMethod import stokeslets_matrix_3d
# 1 velocity at pipe
iscover = pgeo.get_iscover()
uR1 = np.zeros(np.sum(~iscover))
uPhi1 = np.zeros_like(uR1)
uz1 = np.zeros_like(uR1)
uR2 = np.zeros_like(uR1)
uPhi2 = np.zeros_like(uR1)
uz2 = np.zeros_like(uR1)
uR3 = np.zeros_like(uR1)
uPhi3 = np.zeros_like(uR1)
uz3 = np.zeros_like(uR1)
# 2 velocity at cover
# see Liron, N., and R. Shahar. "Stokes flow due to a Stokeslet in a pipe."
# Journal of Fluid Mechanics 86.04 (1978): 727-744.
tuR1_list = []
tuPhi1_list = []
tuz1_list = []
tuR2_list = []
tuPhi2_list = []
tuz2_list = []
tuR3_list = []
tuPhi3_list = []
tuz3_list = []
cover_start_list = pgeo.get_cover_start_list()
n_cover_node = 0
for t_nodes in cover_start_list:
tR = t_nodes[0]
tphi = t_nodes[1]
tz = np.abs(t_nodes[2])
sign_z = np.sign(t_nodes[2])
n_cover_node = n_cover_node + tphi.size
tuR1, tuPhi1, tuz1, tuR2, tuPhi2, tuz2, tuR3, tuPhi3, tuz3 = greenFun.solve_u(tR, tphi,
tz)
tuR1_list.append(tuR1)
tuPhi1_list.append(tuPhi1)
tuz1_list.append(sign_z * tuz1)
tuR2_list.append(tuR2)
tuPhi2_list.append(tuPhi2)
tuz2_list.append(sign_z * tuz2)
tuR3_list.append(sign_z * tuR3)
tuPhi3_list.append(sign_z * tuPhi3)
tuz3_list.append(tuz3)
uR1 = np.hstack((np.hstack(tuR1_list), uR1))
uPhi1 = np.hstack((np.hstack(tuPhi1_list), uPhi1))
uz1 = np.hstack((np.hstack(tuz1_list), uz1))
uR2 = np.hstack((np.hstack(tuR2_list), uR2))
uPhi2 = np.hstack((np.hstack(tuPhi2_list), uPhi2))
uz2 = np.hstack((np.hstack(tuz2_list), uz2))
uR3 = np.hstack((np.hstack(tuR3_list), uR3))
uPhi3 = np.hstack((np.hstack(tuPhi3_list), uPhi3))
uz3 = np.hstack((np.hstack(tuz3_list), uz3))
tuR1_list = []
tuPhi1_list = []
tuz1_list = []
tuR2_list = []
tuPhi2_list = []
tuz2_list = []
tuR3_list = []
tuPhi3_list = []
tuz3_list = []
cover_end_list = pgeo.get_cover_end_list()
for t_nodes in cover_end_list:
tR = t_nodes[0]
tphi = t_nodes[1]
tz = np.abs(t_nodes[2])
sign_z = np.sign(t_nodes[2])
n_cover_node = n_cover_node + tphi.size
tuR1, tuPhi1, tuz1, tuR2, tuPhi2, tuz2, tuR3, tuPhi3, tuz3 = greenFun.solve_u(tR, tphi,
tz)
tuR1_list.append(tuR1)
tuPhi1_list.append(tuPhi1)
tuz1_list.append(sign_z * tuz1)
tuR2_list.append(tuR2)
tuPhi2_list.append(tuPhi2)
tuz2_list.append(sign_z * tuz2)
tuR3_list.append(sign_z * tuR3)
tuPhi3_list.append(sign_z * tuPhi3)
tuz3_list.append(tuz3)
uR1 = np.hstack((uR1, np.hstack(tuR1_list)))
uPhi1 = np.hstack((uPhi1, np.hstack(tuPhi1_list)))
uz1 = np.hstack((uz1, np.hstack(tuz1_list)))
uR2 = np.hstack((uR2, np.hstack(tuR2_list)))
uPhi2 = np.hstack((uPhi2, np.hstack(tuPhi2_list)))
uz2 = np.hstack((uz2, np.hstack(tuz2_list)))
uR3 = np.hstack((uR3, np.hstack(tuR3_list)))
uPhi3 = np.hstack((uPhi3, np.hstack(tuPhi3_list)))
uz3 = np.hstack((uz3, np.hstack(tuz3_list)))
assert n_cover_node == np.sum(iscover), 'something is wrong'
pphi, _, _ = pgeo.get_polar_coord()
ux1 = np.cos(pphi) * uR1 - np.sin(pphi) * uPhi1
ux2 = np.cos(pphi) * uR2 - np.sin(pphi) * uPhi2
ux3 = np.cos(pphi) * uR3 - np.sin(pphi) * uPhi3
uy1 = np.sin(pphi) * uR1 + np.cos(pphi) * uPhi1
uy2 = np.sin(pphi) * uR2 + np.cos(pphi) * uPhi2
uy3 = np.sin(pphi) * uR3 + np.cos(pphi) * uPhi3
u1 = np.vstack((ux1, uy1, uz1)).T
u2 = np.vstack((ux2, uy2, uz2)).T
u3 = np.vstack((ux3, uy3, uz3)).T
# u2, stokeslets, singularity.
b = greenFun.get_b()
stokeslets_post = np.hstack((b, 0, 0)).reshape(1, 3)
geo_stokeslets = base_geo()
geo_stokeslets.set_nodes(stokeslets_post, deltalength=0, resetVelocity=True)
obj_stokeslets = StokesFlowObj()
obj_stokeslets.set_data(geo_stokeslets, geo_stokeslets)
obj_p = StokesFlowObj()
obj_p.set_data(pgeo, pgeo)
m2 = stokeslets_matrix_3d(obj_p, obj_stokeslets)
f12 = np.array((1, 0, 0))
f22 = np.array((0, 1, 0))
f32 = np.array((0, 0, 1))
u12 = np.dot(m2, f12)
u22 = np.dot(m2, f22)
u32 = np.dot(m2, f32)
u11 = u1.flatten() - u12
u21 = u2.flatten() - u22
u31 = u3.flatten() - u32
t1 = time()
PETSc.Sys().Print(' _%05d/%05d_b=%f: calculate %s boundary condation use: %fs' % (
waitBar[0], waitBar[1], b, outputHandle, t1 - t0))
# debug
# length_factor = 0.3
# pgeo.set_velocity(u1)
# pgeo.show_velocity(length_factor=length_factor, show_nodes=False)
# pgeo.set_velocity(u2)
# pgeo.show_velocity(length_factor=length_factor, show_nodes=False)
# pgeo.set_velocity(u3)
# pgeo.show_velocity(length_factor=length_factor, show_nodes=False)
return u11, u21, u31
def _set_temp_var(self, obj1):
# create a empty matrix, and a empty velocity vectors, to avoid use too much time to allocate memory.
comm = PETSc.COMM_WORLD.tompi4py()
ugeo = obj1.get_u_geo().copy()
kwargs = self.get_kwargs()
t_u_pkg = PETSc.DMComposite().create(comm=PETSc.COMM_WORLD)
t_u_pkg.addDM(ugeo.get_dmda())
t_u_pkg.setFromOptions()
t_u_pkg.setUp()
self._t_u11 = [t_u_pkg.createGlobalVector(), t_u_pkg.createGlobalVector(),
t_u_pkg.createGlobalVector()]
self._t_u12 = [t_u_pkg.createGlobalVector(), t_u_pkg.createGlobalVector(),
t_u_pkg.createGlobalVector()]
self._t_u13 = [t_u_pkg.createGlobalVector(), t_u_pkg.createGlobalVector(),
t_u_pkg.createGlobalVector()]
self._t_u2 = t_u_pkg.createGlobalVector()
stokeslet_m = PETSc.Mat().create(comm=PETSc.COMM_WORLD)
stokeslet_m.setSizes((self._t_u11[0].getSizes(), self._f123_petsc[0].getSizes()))
stokeslet_m.setType('dense')
stokeslet_m.setFromOptions()
stokeslet_m.setUp()
self._stokeslet_m = stokeslet_m
u_isglb = t_u_pkg.getGlobalISs()
u_glbIdx = np.hstack(comm.allgather(u_isglb[0].getIndices()))
temp_m = PETSc.Mat().create(comm=PETSc.COMM_WORLD)
temp_m.setSizes((self._t_u11[0].getSizes(), self._f1_list[0].getSizes()))
temp_m.setType('dense')
temp_m.setFromOptions()
temp_m.setUp()
self._t_m = temp_m
t_u_pkg.destroy()
return u_glbIdx
def _create_matrix_obj(self, obj1, m, INDEX='', *args):
# set stokeslets using numerical solution.
t_u_glbIdx_all = self._set_temp_var(
obj1) # index of stokeslets, maybe different from index of m matrix.
_, u_glbIdx_all = obj1.get_u_geo().get_glbIdx()
unodes = obj1.get_u_nodes()
n_obj = len(self.get_all_obj_list())
for i0, obj2 in enumerate(self.get_all_obj_list()):
f_nodes = obj2.get_f_nodes()
_, f_glbIdx_all = obj2.get_f_geo().get_glbIdx()
# f_dmda = obj2.get_f_geo().get_dmda()
f_desc = INDEX + ' %d/%d, ' % (i0 + 1, n_obj)
for i0 in tqdm(range(obj2.get_n_f_node()), desc=f_desc, leave=False):
# for i0 in range(obj2.get_n_f_node( )):
t_f_node = f_nodes[i0]
f_glb = f_glbIdx_all[i0 * 3]
u1, u2, u3 = self._solve_stokeslets_fnode(t_f_node, unodes, t_u_glbIdx_all)
u_range = u1.getOwnershipRange()
u_glbIdx = u_glbIdx_all[u_range[0]:u_range[1]]
m.setValues(u_glbIdx, f_glb + 0, u1, addv=False)
m.setValues(u_glbIdx, f_glb + 1, u2, addv=False)
m.setValues(u_glbIdx, f_glb + 2, u3, addv=False)
m.assemble()
return True
def _f_list_numpy2PETSC(self):
t_f1_list = uniqueList() # list of forces lists for each object at or outside pipe associated with force-nodes at x axis
t_f2_list = uniqueList() # list of forces lists for each object at or outside pipe associated with force-nodes at y axis
t_f3_list = uniqueList() # list of forces lists for each object at or outside pipe associated with force-nodes at z axis
f_pkg = PETSc.DMComposite().create(comm=PETSc.COMM_WORLD)
f_pkg.addDM(self._fpgeo.get_dmda())
f_pkg.setFromOptions()
f_pkg.setUp()
for f1 in self._f1_list:
f1_petsc = f_pkg.createGlobalVector()
f1_petsc.setFromOptions()
f1_petsc.setUp()
f1_petsc[:] = f1[:]
f1_petsc.assemble()
t_f1_list.append(f1_petsc)
for f2 in self._f2_list:
f2_petsc = f_pkg.createGlobalVector()
f2_petsc.setFromOptions()
f2_petsc.setUp()
f2_petsc[:] = f2[:]
f2_petsc.assemble()
t_f2_list.append(f2_petsc)
for f3 in self._f3_list:
f3_petsc = f_pkg.createGlobalVector()
f3_petsc.setFromOptions()
f3_petsc.setUp()
f3_petsc[:] = f3[:]
f3_petsc.assemble()
t_f3_list.append(f3_petsc)
self._f1_list = t_f1_list
self._f2_list = t_f2_list
self._f3_list = t_f3_list
f_pkg.destroy()
return True
def _f_list_PETSC2numpy(self):
t_f1_list = []
t_f2_list = []
t_f3_list = []
# t_f1_list = uniqueList() # list of forces lists for each object at or outside pipe associated with force-nodes at x axis
# t_f2_list = uniqueList() # list of forces lists for each object at or outside pipe associated with force-nodes at y axis
# t_f3_list = uniqueList() # list of forces lists for each object at or outside pipe associated with force-nodes at z axis
for f1_petsc in self._f1_list: # each obj
f1 = self.vec_scatter(f1_petsc)
t_f1_list.append(f1)
for f2_petsc in self._f2_list: # each obj
f2 = self.vec_scatter(f2_petsc)
t_f2_list.append(f2)
for f3_petsc in self._f3_list: # each obj
f3 = self.vec_scatter(f3_petsc)
t_f3_list.append(f3)
self._f1_list = t_f1_list
self._f2_list = t_f2_list
self._f3_list = t_f3_list
return True
def pickmyself_prepare(self):
super().pickmyself_prepare()
self._f_list_PETSC2numpy()
return True
def destroy(self):
super().destroy()
t1 = (self._m_pipe, self._m_pipe_check, self._t_m, self._stokeslet_m, self._t_u2)
for ti in itertools.chain(self._t_u11, self._t_u12, self._t_u13,
self._f123_petsc, t1):
if not ti is None:
ti.destroy()
self._m_pipe = None
self._m_pipe_check = None
self._t_m = None
self._stokeslet_m = None
self._t_u2 = None
self._t_u11 = [[] for _ in self._t_u11]
self._t_u12 = [[] for _ in self._t_u12]
self._t_u13 = [[] for _ in self._t_u13]
self._f123_petsc = [[] for _ in self._f123_petsc]
self._cpgeo.pickmyself_prepare()
self._fpgeo.pickmyself_prepare()
self._vpgeo.pickmyself_prepare()
self._stokeslet_geo.pickmyself_prepare()
return True
def unpick_myself(self, check_MPISIZE=True):
super().unpick_myself(check_MPISIZE=check_MPISIZE)
fileHandle = self.get_kwargs()['forcepipe']
t_path = os.path.dirname(os.path.abspath(__file__))
full_path = os.path.normpath(t_path + '/' + fileHandle)
mat_contents = loadmat(full_path)
self._pipe_geo_load(mat_contents)
self._kwargs['unpickedPrb'] = True
self._f_list_numpy2PETSC()
# create a empty matrix, and a empty velocity vecters,
# to avoid use too much time to allocate memory.
self._set_f123()
# this property changes it's name.
if not hasattr(self, '_stokesletsInPipe_pipeFactor'):
self._stokesletsInPipe_pipeFactor = self._factor
return True
class StokesletsRingProblem(StokesFlowProblem):
# using the symmetric of head, nodes are distribute in the line (r, 0, z).
def check_nodes(self, nodes):
err_msg = 'nodes are distribute in the line (r, 0, z). '
assert np.allclose(nodes[:, 1], 0), err_msg
return True
def add_obj(self, obj):
assert isinstance(obj, StokesFlowRingObj)
super().add_obj(obj)
self.check_nodes(obj.get_u_nodes())
self.check_nodes(obj.get_f_nodes())
class StokesletsRingInPipeProblem(StokesletsRingProblem):
# using the symmetric of pipe and head, nodes are distribute in the line (r, 0, z).
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._th = kwargs['stokeslets_threshold']
if kwargs['use_tqdm_notebook']:
self._tqdm = tqdm_notebook
else:
self._tqdm = tqdm
def check_nodes(self, nodes):
super().check_nodes(nodes)
err_msg = 'r=%f out of the range [0, 1). ' % np.max(nodes[:, 0])
assert np.max(nodes[:, 0]) < 1 and np.min(nodes[:, 0]) >= 0, err_msg
err_msg = 'r=%f out of the range [0, 1). ' % np.min(nodes[:, 0])
assert np.min(nodes[:, 0]) >= 0, err_msg
return True
def _create_matrix_obj(self, obj1, m, INDEX='', *args):
# set stokeslets using theoretical solution from Liron1977
from src.stokesletsInPipe import StokesletsRinginPipe_light
_, u_glbIdx_all = obj1.get_u_geo().get_glbIdx()
u_nodes = obj1.get_u_nodes()
n_obj = len(self.get_all_obj_list())
green_fun = StokesletsRinginPipe_light(threshold=self._th)
green_fun.solve_prepare_light()
b_use = -np.inf
for i00, obj2 in enumerate(self.get_all_obj_list()):
use_matrix_method = 'pf_stokesletsRingInPipe'
err_msg = 'keyword \'matrix_method\' should be \'%s\' ' % use_matrix_method
assert obj2.get_matrix_method() == use_matrix_method, err_msg
f_nodes = obj2.get_f_nodes()
f_dmda_range = obj2.get_f_geo().get_dmda().getRanges()[0]
_, f_glbIdx_all = obj2.get_f_geo().get_glbIdx()
f_desc = INDEX + ' %d/%d, ' % (i00 + 1, n_obj)
for i0 in self._tqdm(range(f_dmda_range[0], f_dmda_range[1]), desc=f_desc, leave=True):
t_f_node = f_nodes[i0]
tb = t_f_node[0]
if not np.isclose(tb, b_use):
green_fun.set_b(tb)
green_fun.solve_prepare_b()
b_use = tb
f_glb = f_glbIdx_all[i0 * 3]
for i1, t_u_node in enumerate(u_nodes):
tru = t_u_node[0]
u_glb = u_glbIdx_all[i1 * 3]
t_z = t_u_node[2] - t_f_node[2]
abs_z = np.abs(t_z)
sign_z = np.sign(t_z)
tm = np.array(green_fun.solve_u_light(tru, abs_z)).reshape(3, 3).T
tsign = np.array(((1, 1, sign_z), (1, 1, sign_z), (sign_z, sign_z, 1)))
rows = (u_glb + 0, u_glb + 1, u_glb + 2)
cols = (f_glb + 0, f_glb + 1, f_glb + 2)
m.setValues(rows, cols, tsign * tm * tb, addv=False)
m.assemble()
return True
class StokesletsRingInPipeProblemSymz(StokesletsRingInPipeProblem):
# assert another symmetry in z drection
def check_nodes(self, nodes):
super().check_nodes(nodes)
err_msg = 'assert additional symmetry along z so z>=0. '
assert np.all(nodes[:, 2] >= 0), err_msg
return True
def _create_matrix_obj(self, obj1, m, INDEX='', *args):
# set stokeslets using theoretical solution from Liron1977
from src.stokesletsInPipe import StokesletsRinginPipe_light
_, u_glbIdx_all = obj1.get_u_geo().get_glbIdx()
u_nodes = obj1.get_u_nodes()
n_obj = len(self.get_all_obj_list())
green_fun = StokesletsRinginPipe_light(threshold=self._th)
green_fun.solve_prepare_light()
b_use = -np.inf
for i00, obj2 in enumerate(self.get_all_obj_list()):
use_matrix_method = 'pf_stokesletsRingInPipeProblemSymz'
err_msg = 'keyword \'matrix_method\' should be \'%s\' ' % use_matrix_method
assert obj2.get_matrix_method() == use_matrix_method, err_msg
f_nodes = obj2.get_f_nodes()
f_dmda_range = obj2.get_f_geo().get_dmda().getRanges()[0]
_, f_glbIdx_all = obj2.get_f_geo().get_glbIdx()
f_desc = INDEX + ' %d/%d, ' % (i00 + 1, n_obj)
for i0 in self._tqdm(range(f_dmda_range[0], f_dmda_range[1]), desc=f_desc, leave=True):
t_f_node = f_nodes[i0]
tb = t_f_node[0]
if not np.isclose(tb, b_use):
green_fun.set_b(tb)
green_fun.solve_prepare_b()
b_use = tb
f_glb = f_glbIdx_all[i0 * 3]
for i1, t_u_node in enumerate(u_nodes):
u_glb = u_glbIdx_all[i1 * 3]
tru = t_u_node[0]
# part 1 of force node, z >= 0
t_z = t_u_node[2] - t_f_node[2]
abs_z = np.abs(t_z)
sign_z = np.sign(t_z)
tm1 = np.array(green_fun.solve_u_light(tru, abs_z)).reshape(3, 3).T
tsign1 = np.array(((1, 1, sign_z), (1, 1, sign_z), (sign_z, sign_z, 1)))
# symmetric part of force node, z <= 0,
# thus sign_z == 1, fR'=-fR, fphi'=fphi, fz'=fz.
t_z = t_u_node[2] + t_f_node[2]
tm2 = np.array(green_fun.solve_u_light(tru, t_z)).reshape(3, 3).T
tsign2 = np.array(((-1, 1, 1), (-1, 1, 1), (-1, 1, 1)))
tm = (tsign1 * tm1 + tsign2 * tm2) * tb
rows = (u_glb + 0, u_glb + 1, u_glb + 2)
cols = (f_glb + 0, f_glb + 1, f_glb + 2)
m.setValues(rows, cols, tm, addv=False)
m.assemble()
return True
class SelfRepeatObj(StokesFlowObj):
def set_data(self, f_geo: SelfRepeat_body_geo, u_geo: base_geo, name='...', **kwargs):
assert isinstance(f_geo, SelfRepeat_body_geo)
super().set_data(f_geo, u_geo, name, **kwargs)
self._type = 'self repeat obj'
return True
def get_total_force(self, center=None):
repeat_n = self.get_f_geo().repeat_n
f_t = super().get_total_force(center=center)
return f_t * repeat_n
class SelfRepeatHlxProblem(StokesFlowProblem):
# Todo: check the directions of the geometry and the rigid body velocity.
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._full_obj_list = []
@property
def full_obj_list(self):
return self._full_obj_list
def add_obj(self, obj_pair):
part_obj, full_obj = obj_pair
assert isinstance(part_obj, SelfRepeatObj)
ugeo = full_obj.get_u_geo()
fgeo = full_obj.get_f_geo()
assert isinstance(ugeo, SelfRepeat_FatHelix)
assert isinstance(fgeo, SelfRepeat_FatHelix)
self._full_obj_list.append(full_obj)
return super().add_obj(part_obj)
class SelfRotateObj(StokesFlowObj):
def set_data(self, *args, **kwargs):
super().set_data(*args, **kwargs)
self._type = 'self rotate obj'
return True
def get_total_force(self, center=None):
f_t = super().get_total_force(center=center)
f = f_t[:3]
t = f_t[3:]
problem_n_copy = self.get_problem().get_kwargs()['problem_n_copy']
problem_norm = self.get_problem().get_kwargs()['problem_norm']
F, T = 0, 0
for thetai in np.linspace(0, 2 * np.pi, problem_n_copy, endpoint=False):
rot_M = get_rot_matrix(problem_norm, thetai)
# PETSc.Sys.Print(np.dot(rot_M, f))
F = F + np.dot(rot_M, f)
T = T + np.dot(rot_M, t)
return np.hstack((F, T))
class SelfRotateProblem(StokesFlowProblem):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._problem_center = kwargs['problem_center']
self._problem_norm = kwargs['problem_norm']
self._problem_n_copy = kwargs['problem_n_copy']
def set_rigid_velocity(self, u, w):
problem_norm = self.get_kwargs()['problem_norm']
problem_center = self.get_kwargs()['problem_center']
U = np.hstack((u * problem_norm, w * problem_norm))
for tobj in self.get_obj_list():
tobj.set_rigid_velocity(U, problem_center)
return True
def add_obj(self, obj):
assert isinstance(obj, SelfRotateObj)
return super().add_obj(obj)
def show_all_u_nodes(self, linestyle='-'):
problem_norm = self.get_kwargs()['problem_norm']
problem_n_copy = self.get_kwargs()['problem_n_copy']
problem_center = self.get_kwargs()['problem_center']
geo_list = uniqueList()
for obj1 in self.get_all_obj_list():
tugeo = obj1.get_u_geo()
if isinstance(obj1, SelfRotateObj):
for thetai in np.linspace(0, 2 * np.pi, problem_n_copy, endpoint=False):
tugeo2 = tugeo.copy()
tugeo2.node_rotation(problem_norm, thetai, problem_center)
geo_list.append(tugeo2)
else:
geo_list.append(tugeo)
temp_geo = base_geo()
temp_geo.combine(geo_list)
temp_geo.show_nodes(linestyle)
return True
# def show_f_nodes(self, linestyle='-'):
# err_msg='not finish yet'
# assert 1==2, err_msg
#
# def show_f_u_nodes(self, linestyle='-'):
# err_msg='not finish yet'
# assert 1==2, err_msg
#
# def show_force(self, length_factor=1, show_nodes=True):
# err_msg='not finish yet'
# assert 1==2, err_msg
#
# def show_velocity(self, length_factor=1, show_nodes=True):
# err_msg='not finish yet'
# assert 1==2, err_msg
class ForceFreeComposite:
def __init__(self, center: np.array, norm: np.array, name='...', *args):
self._obj_list = uniqueList()
self._rel_U_list = [] # (ux,uy,uz,wx,wy,wz)
self._index = -1 # index of object
self._problem = None
self._center = None
self.set_center(center)
self._norm = None
self.set_norm(norm)
self._psi = 0 # rotate of the Composite about the norm axis
self._n_fnode = 0
self._n_unode = 0
self._f_glbIdx = np.array([]) # global indices
self._f_glbIdx_all = np.array([]) # global indices for all process.
self._u_glbIdx = np.array([]) # global indices
self._u_glbIdx_all = np.array([]) # global indices for all process.
self._type = 'ForceFreeComposite' # object type
self._name = name # object name
self._ref_U = np.zeros(6) # ux, uy, uz, omega_x, omega_y, omega_z
# self._sum_force = np.inf * np.ones(6) # [F, T]==0 to satisfy the force free equations.
self._min_ds = np.inf # min deltalength of objects in the composite
self._f_dmda = None
self._u_dmda = None
self.set_dmda()
# the following properties store the location history of the composite.
self._update_fun = Adams_Moulton_Methods
self._update_order = 1
self._locomotion_fct = np.ones(3)
self._center_hist = []
self._norm_hist = []
self._ref_U_hist = [] # (ux,uy,uz,wx,wy,wz)
self._displace_hist = []
self._rotation_hist = []
def __repr__(self):
return self.get_obj_name()
def __str__(self):
t_str = self.get_name() + ': {'
for subobj in self.get_obj_list():
t_str = t_str + subobj.get_name() + '; '
t_str = t_str + '}'
return t_str
def add_obj(self, obj, rel_U):
self._obj_list.append(obj)
obj.set_index(self.get_n_obj())
obj.set_problem(self)
obj.set_rigid_velocity(rel_U, self.get_center())
self._rel_U_list.append(rel_U)
self._n_fnode += obj.get_n_f_node()
self._n_unode += obj.get_n_u_node()
self._min_ds = np.min((self._min_ds, obj.get_u_geo().get_deltaLength()))
return True
def set_rel_U_list(self, rel_U_list):
err_msg = 'wrong rel_U_list shape. '
assert len(self.get_obj_list()) == len(rel_U_list), err_msg
err_msg = 'wrong rel_U shape. '
for sub_obj, rel_U in zip(self.get_obj_list(), rel_U_list):
assert rel_U.size == 6, err_msg
sub_obj.set_rigid_velocity(rel_U, self.get_center())
self._rel_U_list = rel_U_list
return True
def get_f_dmda(self):
return self._f_dmda
def get_u_dmda(self):
return self._u_dmda
def set_dmda(self):
# additional degrees of freedom for force free.
self._f_dmda = PETSc.DMDA().create(sizes=(6,), dof=1, stencil_width=0,
comm=PETSc.COMM_WORLD)
self._f_dmda.setFromOptions()
self._f_dmda.setUp()
self._u_dmda = PETSc.DMDA().create(sizes=(6,), dof=1, stencil_width=0,
comm=PETSc.COMM_WORLD)
self._u_dmda.setFromOptions()
self._u_dmda.setUp()
return True
def destroy_dmda(self):
self._f_dmda.destroy()
self._u_dmda.destroy()
self._f_dmda = None
self._u_dmda = None
return True
def get_n_obj(self):
return len(self._obj_list)
def get_obj_list(self):
return self._obj_list
def get_n_f_node(self):
return self._n_fnode
def get_n_u_node(self):
return self._n_unode
def get_rel_U_list(self):
return self._rel_U_list
def get_center(self):
return self._center
def set_center(self, center):
err_msg = 'center=[x, y, z] has 3 components. '
assert center.size == 3, err_msg
self._center = center
return True
def get_norm(self):
return self._norm
def get_psi(self):
return self._psi
def set_norm(self, norm):
err_msg = 'norm=[x, y, z] has 3 components and ||norm|| > 0. '
assert norm.size == 3 and np.linalg.norm(norm) > 0, err_msg
self._norm = norm / np.linalg.norm(norm)
return True
def get_index(self):
return self._index
def get_min_ds(self):
return self._min_ds
def get_type(self):
return self._type
def get_obj_name(self):
return self._type + ' (index %d)' % self._index
def get_name(self):
return self._name
def set_name(self, name):
self._name = name
return True
def set_index(self, new_index):
self._index = new_index
return True
def set_f_glbIdx(self, indices):
comm = PETSc.COMM_WORLD.tompi4py()
self._f_glbIdx = indices
self._f_glbIdx_all = np.hstack(comm.allgather(indices))
return True
def get_f_glbIdx(self):
return self._f_glbIdx, self._f_glbIdx_all
def set_u_glbIdx(self, indices):
comm = PETSc.COMM_WORLD.tompi4py()
self._u_glbIdx = indices
self._u_glbIdx_all = np.hstack(comm.allgather(indices))
return True
def get_u_glbIdx(self):
return self._u_glbIdx, self._u_glbIdx_all
def get_combined_obj(self):
obj0 = StokesFlowObj()
obj0.combine(self.get_obj_list(), set_re_u=True, set_force=True)
return obj0
def set_problem(self, problem: 'StokesFlowProblem'):
self._problem = problem
return True
def clear_obj_list(self):
self._obj_list = uniqueList()
self._rel_U_list = []
return True
def print_info(self):
PETSc.Sys.Print(' %s: father %s, type %s, index %d, force nodes %d, velocity nodes %d'
% (self.get_name(), self._problem.get_name(), self._type, self.get_index(),
self.get_n_f_node(), self.get_n_u_node()))
for obj in self._obj_list:
obj.print_info()
return True
def copy(self):
composite2 = copy.copy(self)
composite2.set_problem(self._problem)
composite2.set_index(-1)
composite2.set_dmda()
composite2.clear_obj_list()
for sub_obj, rel_U in zip(self.get_obj_list(), self.get_rel_U_list()):
obj1 = sub_obj.copy()
composite2.add_obj(obj1, rel_U)
return composite2
def move(self, displacement):
for subobj in self.get_obj_list():
subobj.move(displacement=displacement)
self._center = self._center + displacement
return True
def node_rotation(self, norm=np.array([0, 0, 1]), theta=np.zeros(1), rotation_origin=None):
rotation_origin = self._center if rotation_origin is None else rotation_origin
for subobj, rel_U in zip(self.get_obj_list(), self.get_rel_U_list()):
subobj.node_rotation(norm=norm, theta=theta, rotation_origin=rotation_origin)
subobj.set_rigid_velocity(rel_U, self.get_center())
rotation = get_rot_matrix(norm, theta)
t_origin = self._center
t_norm = self._norm.copy()
self._center = np.dot(rotation, (self._center - rotation_origin)) + rotation_origin
self._norm = np.dot(rotation, (self._norm + t_origin - rotation_origin)) \
+ rotation_origin - self._center
self._norm = self._norm / np.linalg.norm(self._norm)
rel_U_list = []
for rel_U0 in self.get_rel_U_list():
tU = np.dot(rotation, (rel_U0[:3] + t_origin - rotation_origin)) \
+ rotation_origin - self._center
tW = np.dot(rotation, (rel_U0[3:] + t_origin - rotation_origin)) \
+ rotation_origin - self._center
rel_U_list.append(np.hstack((tU, tW)))
self._rel_U_list = rel_U_list
ref_U0 = self.get_ref_U()
tU = np.dot(rotation, (ref_U0[:3] + t_origin - rotation_origin)) \
+ rotation_origin - self._center
tW = np.dot(rotation, (ref_U0[3:] + t_origin - rotation_origin)) \
+ rotation_origin - self._center
self.set_ref_U(np.hstack((tU, tW)))
# dbg, current version have no effect.
self._psi = self._psi + np.dot(t_norm, norm) / (
np.linalg.norm(t_norm) * np.linalg.norm(norm)) * theta
return True
def set_ref_U(self, U):
self._ref_U = U
return True
def get_ref_U(self):
return self._ref_U
def get_ref_Ux(self):
return self._ref_U[0]
def get_ref_Uy(self):
return self._ref_U[1]
def get_ref_Uz(self):
return self._ref_U[2]
def get_ref_Omegax(self):
return self._ref_U[3]
def get_ref_Omegay(self):
return self._ref_U[4]
def get_ref_Omegaz(self):
return self._ref_U[5]
# def set_total_force(self, sum_force):
# self._sum_force = sum_force
# return True
def get_total_force(self):
sum_F = np.sum(
[tobj.get_total_force(center=self.get_center()) for tobj in self.get_obj_list()],
axis=0)
return sum_F
def show_velocity(self, length_factor=1, show_nodes=True):
geo_list = uniqueList()
for obj1 in self.get_obj_list():
geo_list.append(obj1.get_u_geo())
temp_geo = base_geo()
temp_geo.combine(geo_list)
temp_geo.show_velocity(length_factor=length_factor, show_nodes=show_nodes)
return True
def show_f_nodes(self, linestyle='-'):
geo_list = uniqueList()
for obj1 in self.get_obj_list():
geo_list.append(obj1.get_f_geo())
temp_geo = base_geo()
temp_geo.combine(geo_list)
temp_geo.show_nodes(linestyle)
return True
def get_f_nodes(self):
geo_list = uniqueList()
for obj1 in self.get_obj_list():
geo_list.append(obj1.get_f_geo())
temp_geo = base_geo()
temp_geo.combine(geo_list)
return temp_geo.get_nodes()
def show_u_nodes(self, linestyle='-'):
geo_list = uniqueList()
for obj1 in self.get_obj_list():
geo_list.append(obj1.get_u_geo())
temp_geo = base_geo()
temp_geo.combine(geo_list)
temp_geo.show_nodes(linestyle)
return True
def get_u_nodes(self):
geo_list = uniqueList()
for obj1 in self.get_obj_list():
geo_list.append(obj1.get_u_geo())
temp_geo = base_geo()
temp_geo.combine(geo_list)
return temp_geo.get_nodes()
def png_u_nodes(self, filename, linestyle='-'):
geo_list = uniqueList()
for obj1 in self.get_obj_list():
geo_list.append(obj1.get_u_geo())
temp_geo = base_geo()
temp_geo.combine(geo_list)
temp_geo.png_nodes(filename, linestyle)
return True
def show_f_u_nodes(self, linestyle='-'):
f_geo_list = uniqueList()
u_geo_list = uniqueList()
for obj1 in self.get_obj_list():
f_geo_list.append(obj1.get_f_geo())
if obj1.get_f_geo() is not obj1.get_u_geo():
u_geo_list.append(obj1.get_u_geo())
f_geo = base_geo()
f_geo.combine(f_geo_list)
u_geo = base_geo()
u_geo.combine(u_geo_list)
temp_geo = geoComposit()
temp_geo.append(u_geo)
temp_geo.append(f_geo)
temp_geo.show_nodes(linestyle)
return True
def save_mat(self):
addInfo = self._problem.get_name() + '_'
for subobj in self.get_obj_list():
subobj.save_mat(addInfo)
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
u_glbIdx, u_glbIdx_all = self.get_u_glbIdx()
f_glbIdx, f_glbIdx_all = self.get_f_glbIdx()
t_nodes = np.full(6, np.nan).reshape((2, 3))
filename = self._problem.get_name() + '_' + self.get_name() + '.mat'
if rank == 0:
savemat(filename,
{'fnodes': t_nodes,
'unodes': t_nodes,
'u_glbIdx': u_glbIdx,
'u_glbIdx_all': u_glbIdx_all,
'f_glbIdx': f_glbIdx,
'f_glbIdx_all': f_glbIdx_all,
'force': np.zeros(6),
# 're_velocity': self._sum_force,
'velocity': self._ref_U, },
oned_as='column')
PETSc.Sys.Print('%s: save information to %s' % (str(self), filename))
return True
def vtk(self, filename, stp_idx=0):
for obj0 in self.get_obj_list():
obj0.vtk(filename, stp_idx)
return True
def vtk_self(self, filename, stp_idx=0, matrix_method=None):
if matrix_method is None:
matrix_method = self.get_obj_list()[0].get_matrix_method()
t0 = time()
obj0 = obj_dic[matrix_method]()
obj0.combine(self.get_obj_list(), set_re_u=True, set_force=True)
obj0.set_name('comp')
obj0.set_matrix_method(matrix_method)
# self.show_velocity()
# obj0.show_velocity()
obj0.vtk(filename, stp_idx)
t1 = time()
PETSc.Sys.Print('%s: write self vtk files use: %fs' % (str(self), (t1 - t0)))
return True
def pickmyself_prepare(self):
self.destroy_dmda()
for sub_obj in self.get_obj_list():
sub_obj.pickmyself_prepare()
return True
def unpick_myself(self):
self.set_dmda()
for sub_obj in self.get_obj_list():
sub_obj.unpick_myself()
return True
def set_update_para(self, fix_x=False, fix_y=False, fix_z=False,
update_fun=Adams_Moulton_Methods,
update_order=1):
# for a cutoff infinity symmetric problem,
# each time step set the obj in the center of the cutoff region to improve the accuracy.
self._locomotion_fct = np.array((not fix_x, not fix_y, not fix_z), dtype=np.float)
self._update_fun = update_fun
self._update_order = update_order
return self._locomotion_fct
def update_location(self, eval_dt, print_handle=''):
fct = self._locomotion_fct
ref_U = self.get_ref_U()
self._ref_U_hist.append(ref_U)
norm = self.get_norm()
PETSc.Sys.Print(' -->', str(self), print_handle)
PETSc.Sys.Print(' ref_U', ref_U)
PETSc.Sys.Print(' norm', norm)
tU = np.dot(ref_U[:3], norm) / np.dot(norm, norm)
tW = np.dot(ref_U[3:], norm) / np.dot(norm, norm)
PETSc.Sys.Print(' |ref_U|',
np.hstack((np.linalg.norm(ref_U[:3]), np.linalg.norm(ref_U[3:]))))
PETSc.Sys.Print(' ref_U projection on norm', np.hstack((tU, tW)))
# # dbg
# rel_U_head = self.get_rel_U_list()[0]
# rel_U_tail = self.get_rel_U_list()[1]
# PETSc.Sys.Print(' U_head', rel_U_head + ref_U)
# PETSc.Sys.Print(' U_tail', rel_U_tail + ref_U)
order = np.min((len(self.get_ref_U_hist()), self._update_order))
fct_list = self.get_ref_U_hist()[-1:-(order + 1):-1]
dst_fct_list = [fct[:3] for fct in fct_list]
rot_fct_list = [fct[3:] for fct in fct_list]
distance_true = self._update_fun(order, dst_fct_list, eval_dt)
rotation = self._update_fun(order, rot_fct_list, eval_dt)
# distance_true = ref_U[:3] * eval_dt
# rotation = ref_U[3:] * eval_dt
distance = distance_true * fct
self.move(distance)
self.node_rotation(norm=rotation, theta=np.linalg.norm(rotation))
self._center_hist.append(self._center)
self._norm_hist.append(self._norm)
self._displace_hist.append(distance_true)
self._rotation_hist.append(rotation)
for sub_obj, rel_U in zip(self.get_obj_list(), self.get_rel_U_list()):
distance = rel_U[:3] * eval_dt
rotation = rel_U[3:] * eval_dt
sub_obj.move(distance)
sub_obj.node_rotation(norm=rotation, theta=np.linalg.norm(rotation))
sub_obj.update_location(eval_dt)
return True
def get_locomotion_fct(self):
return self._locomotion_fct
def get_center_hist(self):
return self._center_hist
def get_norm_hist(self):
return self._norm_hist
def get_ref_U_hist(self):
return self._ref_U_hist
def get_displace_hist(self):
return self._displace_hist
def get_rotation_hist(self):
return self._rotation_hist
class GivenTorqueComposite(ForceFreeComposite):
"""
[ M R ] [ F ] = [ Uref + Wref×ri + Urel (-Ubi) ]
[ R 0 ] [ Wref ] [ Tgiven ]
"""
def __init__(self, center: np.array, norm: np.array, givenT=np.zeros(3), givenU=np.zeros(3),
name='...', *args):
super().__init__(center, norm, name, *args)
self._type = 'GivenTorqueComposite' # object type
self._givenT = np.zeros(3) # given Torque.
self.set_givenT(givenT)
self._givenU = np.zeros(3) # given velocity.
self.set_givenU(givenU)
def set_dmda(self):
# additional degrees of freedom for force free.
self._f_dmda = PETSc.DMDA().create(sizes=(3,), dof=1, stencil_width=0,
comm=PETSc.COMM_WORLD)
self._f_dmda.setFromOptions()
self._f_dmda.setUp()
self._u_dmda = PETSc.DMDA().create(sizes=(3,), dof=1, stencil_width=0,
comm=PETSc.COMM_WORLD)
self._u_dmda.setFromOptions()
self._u_dmda.setUp()
return True
def update_location(self, eval_dt, print_handle=''):
fct = self._locomotion_fct
ref_U = self.get_ref_U()
self._ref_U_hist.append(ref_U)
norm = self.get_norm()
PETSc.Sys.Print(' -->', str(self), print_handle)
PETSc.Sys.Print(' ref_U', ref_U)
PETSc.Sys.Print(' norm', norm)
tU = np.dot(ref_U[:3], norm) / np.dot(norm, norm)
tW = np.dot(ref_U[3:], norm) / np.dot(norm, norm)
PETSc.Sys.Print(' |ref_U|',
np.hstack((np.linalg.norm(ref_U[:3]), np.linalg.norm(ref_U[3:]))))
PETSc.Sys.Print(' ref_U projection on norm', np.hstack((tU, tW)))
order = np.min((len(self.get_ref_U_hist()), self._update_order))
fct_list = self.get_ref_U_hist()[-1:-(order + 1):-1]
dst_fct_list = [fct[:3] for fct in fct_list]
rot_fct_list = [fct[3:] for fct in fct_list]
distance_true = self._update_fun(order, dst_fct_list, eval_dt)
rotation = self._update_fun(order, rot_fct_list, eval_dt)
# distance_true = ref_U[:3] * eval_dt
# rotation = ref_U[3:] * eval_dt
distance = distance_true * fct
self.move(distance)
self.node_rotation(norm=rotation, theta=np.linalg.norm(rotation))
self._center_hist.append(self._center)
self._norm_hist.append(self._norm)
self._displace_hist.append(distance_true)
self._rotation_hist.append(rotation)
for sub_obj, rel_U in zip(self.get_obj_list(), self.get_rel_U_list()):
distance = rel_U[:3] * eval_dt
rotation = rel_U[3:] * eval_dt
sub_obj.move(distance)
sub_obj.node_rotation(norm=rotation, theta=np.linalg.norm(rotation))
sub_obj.update_location(eval_dt)
return True
def set_ref_U(self, W_ref):
# in this case, U->W_ref is the reference spin.
W_ref = np.array(W_ref).flatten()
err_msg = 'in %s composite, W_ref=[wx, wy, wz] has 3 components. ' % repr(self)
assert W_ref.size == 3, err_msg
# U_ref=[ux, uy, uz] is a rigid body motion. U_ref = givenU * norm + u_b, where u_b is the background flow.
# therefore, u_b is also a rigid body motion. here use the background velocity at composite center.
givenU = self.get_givenU()
problem = self._problem
if isinstance(problem, _GivenFlowProblem):
# # dbg
# PETSc.Sys.Print(givenU)
# PETSc.Sys.Print(problem.get_given_flow_at(self.get_center()))
givenU = givenU + problem.get_given_flow_at(self.get_center())
self._ref_U = np.hstack((givenU, W_ref))
return True
def get_givenT(self):
return self._givenT
def set_givenT(self, givenT):
givenT = np.array(givenT).flatten()
err_msg = 'givenT=[tx, ty, tz] has 3 components. '
assert givenT.size == 3, err_msg
self._givenT = givenT
return True
def get_givenU(self):
return self._givenU
def set_givenU(self, givenU):
givenU = np.array(givenU).flatten()
err_msg = 'givenU=[ux, uy, uz] has 3 components. '
assert givenU.size == 3, err_msg
self._givenU = givenU
return True
class GivenVelocityComposite(ForceFreeComposite):
"""
currently, only work for two parts currently.
[ Mhh Mht Rh 0 ] [ Fh ] = [ Uref + Urel (-Ubi) ]
[ Mth Mtt 0 Rt ] [ Ft ] [ Uref + Urel (-Ubi) ]
[ I I 0 0 ] [ Wrel_head ] [ 0 ]
[ Rh Rt 0 0 ] [ wrel_tail ] [ 0 ]
Wref_head == Wref_head == 0
"""
def __init__(self, center: np.array, norm: np.array, givenU=np.zeros(3), name='...', *args):
super().__init__(center, norm, name, *args)
self._type = 'GivenVelocityComposite' # object type
self._givenU = np.zeros(3) # given velocity.
self.set_givenU(givenU)
def add_obj(self, obj, rel_U):
# rel_w(x,y,z) are solved later.
err_msg = 'rel_U=[rel_ux, rel_uy, rel_uz, 0, 0, 0] for the GivenVelocityComposite'
assert np.all(rel_U[3:] == np.zeros(3)), err_msg
super().add_obj(obj, rel_U)
return True
def update_location(self, eval_dt, print_handle=''):
fct = self._locomotion_fct
ref_U = self.get_ref_U()
self._ref_U_hist.append(ref_U)
norm = self.get_norm()
# currently, only wrok for two part composite.
err_msg = 'current version: len(self.get_obj_list()) == 2'
assert len(self.get_obj_list()) == 2, err_msg
U_rel_head = self.get_rel_U_list()[0]
U_rel_tail = self.get_rel_U_list()[1]
PETSc.Sys.Print(' -->', str(self), print_handle)
PETSc.Sys.Print(' ref_U', ref_U)
PETSc.Sys.Print(' norm', norm)
tU = np.dot(ref_U[:3], norm) / np.dot(norm, norm)
tW = np.dot(ref_U[3:], norm) / np.dot(norm, norm)
PETSc.Sys.Print(' |ref_U|',
np.hstack((np.linalg.norm(ref_U[:3]), np.linalg.norm(ref_U[3:]))))
PETSc.Sys.Print(' ref_U projection on norm', np.hstack((tU, tW)))
PETSc.Sys.Print(' U_rel_head', U_rel_head)
PETSc.Sys.Print(' U_rel_tail', U_rel_tail)
PETSc.Sys.Print(' Wrel_motor', (- U_rel_head + U_rel_tail)[3:])
PETSc.Sys.Print(' U_head', U_rel_head + ref_U)
PETSc.Sys.Print(' U_tail', U_rel_tail + ref_U)
order = np.min((len(self.get_ref_U_hist()), self._update_order))
fct_list = self.get_ref_U_hist()[-1:-(order + 1):-1]
dst_fct_list = [fct[:3] for fct in fct_list]
rot_fct_list = [fct[3:] for fct in fct_list]
distance_true = self._update_fun(order, dst_fct_list, eval_dt)
rotation = self._update_fun(order, rot_fct_list, eval_dt)
# distance_true = ref_U[:3] * eval_dt
# rotation = ref_U[3:] * eval_dt
distance = distance_true * fct
self.move(distance)
self.node_rotation(norm=rotation, theta=np.linalg.norm(rotation))
self._center_hist.append(self._center)
self._norm_hist.append(self._norm)
self._displace_hist.append(distance_true)
self._rotation_hist.append(rotation)
for sub_obj, rel_U in zip(self.get_obj_list(), self.get_rel_U_list()):
distance = rel_U[:3] * eval_dt
rotation = rel_U[3:] * eval_dt
sub_obj.move(distance)
sub_obj.node_rotation(norm=rotation, theta=np.linalg.norm(rotation))
sub_obj.update_location(eval_dt)
return True
def set_ref_U(self, W_ref):
# in this case, U->W_ref is the reference spin of head and tail.
# currently, the the composite can only handle two part problem.
W_ref = np.array(W_ref).flatten()
# W_ref = [W_head_x, W_head_y, W_head_z, W_tail_x, W_tail_y, W_tail_z] is two rigid body spins.
givenU = self.get_givenU()
problem = self._problem
if isinstance(problem, _GivenFlowProblem):
givenU = givenU + problem.get_given_flow_at(self.get_center())
self._ref_U = np.hstack((givenU, (0, 0, 0)))
# reset the rel_U list
# currently, only wrok for two part composite.
err_msg = 'current version: len(self.get_obj_list()) == 2'
assert len(self.get_obj_list()) == 2, err_msg
rel_U_list = []
# head
tobj = self.get_obj_list()[0]
rel_U = self.get_rel_U_list()[0]
t_rel_U = np.hstack((rel_U[:3], W_ref[:3]))
tobj.set_rigid_velocity(t_rel_U, self.get_center())
rel_U_list.append(t_rel_U)
# tail
tobj = self.get_obj_list()[1]
rel_U = self.get_rel_U_list()[1]
t_rel_U = np.hstack((rel_U[:3], W_ref[3:]))
tobj.set_rigid_velocity(t_rel_U, self.get_center())
rel_U_list.append(t_rel_U)
self._rel_U_list = rel_U_list
return True
def get_givenU(self):
return self._givenU
def set_givenU(self, givenU):
givenU = np.array(givenU).flatten()
err_msg = 'givenU=[ux, uy, uz] has 3 components. '
assert givenU.size == 3, err_msg
self._givenU = givenU
return True
class ForceFree1DInfComposite(ForceFreeComposite):
def set_dmda(self):
# additional degrees of freedom for force free.
self._f_dmda = PETSc.DMDA().create(sizes=(2,), dof=1, stencil_width=0,
comm=PETSc.COMM_WORLD)
self._f_dmda.setFromOptions()
self._f_dmda.setUp()
self._u_dmda = PETSc.DMDA().create(sizes=(2,), dof=1, stencil_width=0,
comm=PETSc.COMM_WORLD)
self._u_dmda.setFromOptions()
self._u_dmda.setUp()
return True
class GivenForceComposite(ForceFreeComposite):
def __init__(self, center: np.array, norm: np.array, name='...', givenF=np.zeros(6), *args):
self._givenF = np.zeros(6) # given external force and torque.
super().__init__(center=center, norm=norm, name=name, *args)
self.set_givenF(givenF)
def get_givenF(self):
return self._givenF
def set_givenF(self, givenF):
err_msg = 'givenF=[fx, fy, fz, tx, ty, tz] has 6 components. '
assert givenF.size == 6, err_msg
self._givenF = givenF
return True
# def node_rotation(self, norm=np.array([0, 0, 1]), theta=np.zeros(1), rotation_origin=None):
# rotation_origin = self._center if rotation_origin is None else rotation_origin
# for subobj, rel_U in zip(self.get_obj_list(), self.get_rel_U_list()):
# subobj.node_rotation(norm=norm, theta=theta, rotation_origin=rotation_origin)
# subobj.set_rigid_velocity(rel_U, self.get_center())
#
# rotation = get_rot_matrix(norm, theta)
# t_origin = self._center
# t_norm = self._norm.copy()
# self._center = np.dot(rotation, (self._center - rotation_origin)) + rotation_origin
# self._norm = np.dot(rotation, (self._norm + t_origin - rotation_origin)) \
# + rotation_origin - self._center
# self._norm = self._norm / np.linalg.norm(self._norm)
#
# rel_U_list = []
# for rel_U0 in self.get_rel_U_list():
# tU = np.dot(rotation, (rel_U0[:3] + t_origin - rotation_origin)) \
# + rotation_origin - self._center
# tW = np.dot(rotation, (rel_U0[3:] + t_origin - rotation_origin)) \
# + rotation_origin - self._center
# rel_U_list.append(np.hstack((tU, tW)))
# self._rel_U_list = rel_U_list
#
# ref_U0 = self.get_ref_U()
# tU = np.dot(rotation, (ref_U0[:3] + t_origin - rotation_origin)) \
# + rotation_origin - self._center
# tW = np.dot(rotation, (ref_U0[3:] + t_origin - rotation_origin)) \
# + rotation_origin - self._center
# self.set_ref_U(np.hstack((tU, tW)))
#
# # dbg, current version have no effect.
# self._psi = self._psi + np.dot(t_norm, norm) / (
# np.linalg.norm(t_norm) * np.linalg.norm(norm)) * theta
# return True
def core_show_givenF(self, arrowFactor=1):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
center = self.get_center()
givenF = self.get_givenF()
geo_list = uniqueList()
for obj1 in self.get_obj_list():
geo_list.append(obj1.get_u_geo())
temp_geo = base_geo()
temp_geo.combine(geo_list)
fig = temp_geo.core_show_nodes()
if rank == 0:
temp1 = arrowFactor * givenF[:3] / np.sqrt(
np.sum(givenF[:3] ** 2)) # normalized, for show.
temp2 = arrowFactor * givenF[3:] / np.sqrt(
np.sum(givenF[3:] ** 2)) # normalized, for show.
ax = fig.gca()
ax.quiver(center[0], center[1], center[2], temp1[0], temp1[1], temp1[2], color='r')
ax.quiver(center[0], center[1], center[2], temp2[0], temp2[1], temp2[2], color='k')
return fig
def show_givenF(self, arrowFactor=1):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
self.core_show_givenF(arrowFactor=arrowFactor)
if rank == 0:
plt.grid()
plt.get_current_fig_manager().window.showMaximized()
plt.show()
return True
def png_givenF(self, finename, arrowFactor=1):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
finename = check_file_extension(finename, '.png')
fig = self.core_show_givenF(arrowFactor=arrowFactor)
if rank == 0:
fig.set_size_inches(18.5, 10.5)
fig.savefig(finename, dpi=100)
plt.close()
return True
class GivenForce1DInfComposite(GivenForceComposite, ForceFree1DInfComposite):
def _nothing(self):
pass
class ForceFreeProblem(StokesFlowProblem):
# _ffweigth # type: np.ndarray
def _init_kwargs(self, **kwargs):
super()._init_kwargs(**kwargs)
ffweightx = kwargs['ffweightx'] / kwargs['zoom_factor']
ffweighty = kwargs['ffweighty'] / kwargs['zoom_factor']
ffweightz = kwargs['ffweightz'] / kwargs['zoom_factor']
ffweightT = kwargs['ffweightT'] / kwargs['zoom_factor']
self._ffweigth = ... # type: np.ndarray
self.set_ffweight(ffweightx, ffweighty, ffweightz, ffweightT)
return True
def get_ffweight(self):
return self._ffweigth
def set_ffweight(self, ffweightx, ffweighty, ffweightz, ffweightT):
self._ffweigth = np.array([ffweightx, ffweighty, ffweightz,
ffweightT ** 2, ffweightT ** 2, ffweightT ** 2])
assert self._ffweigth[3] == self._ffweigth[4] == self._ffweigth[5], \
' # IMPORTANT!!! _ffweigth[3]==_ffweigth[4]==_ffweigth[5]'
PETSc.Sys.Print('-->absolute force free weight %s ' % self._ffweigth)
return True
def __init__(self, **kwargs):
# self._ffweigth = ...
super().__init__(**kwargs)
self._all_obj_list = uniqueList() # contain all objects, including subobj within forcefreeComposite.
self._compst_list = uniqueList() # forcefreeComposite list.
def add_obj(self, obj):
if isinstance(obj, ForceFreeComposite):
self._obj_list.append(obj)
obj.set_index(self.get_n_obj())
obj.set_problem(self)
# obj.set_matrix_method(self.get_matrix_method())
for sub_obj in obj.get_obj_list():
self._check_add_obj(sub_obj)
self._all_obj_list.append(sub_obj)
self._f_pkg.addDM(sub_obj.get_f_geo().get_dmda())
self._u_pkg.addDM(sub_obj.get_u_geo().get_dmda())
self._n_fnode += sub_obj.get_n_f_node()
self._n_unode += sub_obj.get_n_u_node()
sub_obj.set_matrix_method(self.get_matrix_method())
self._f_pkg.addDM(obj.get_f_dmda())
self._u_pkg.addDM(obj.get_u_dmda())
self._compst_list.append(obj)
else:
self._all_obj_list.append(obj)
super().add_obj(obj)
return True
def get_all_obj_list(self, ):
return self._all_obj_list
def _create_U(self):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
velocity = self._u_pkg.createGlobalVector()
velocity.zeroEntries()
for obj0 in self.get_obj_list():
if isinstance(obj0, ForceFreeComposite):
center = obj0.get_center()
for sub_obj, rel_U in zip(obj0.get_obj_list(), obj0.get_rel_U_list()):
sub_nodes = sub_obj.get_u_geo().get_nodes()
# sub_obj.show_velocity(length_factor=0.1, show_nodes=True)
r = sub_nodes - center
t_u = (rel_U[:3] + np.cross(rel_U[3:], r)).flatten()
_, u_glbIdx_all = sub_obj.get_u_geo().get_glbIdx()
if rank == 0:
velocity[u_glbIdx_all] = t_u[:]
else:
u0 = obj0.get_velocity()
_, u_glbIdx_all = obj0.get_u_geo().get_glbIdx()
if rank == 0:
velocity[u_glbIdx_all] = u0[:]
velocity.assemble()
self._velocity_petsc = velocity
return True
def _set_glbIdx(self):
# global index
f_isglb = self._f_pkg.getGlobalISs()
u_isglb = self._u_pkg.getGlobalISs()
for obj0 in self.get_obj_list():
if isinstance(obj0, ForceFreeComposite):
for sub_obj in obj0.get_obj_list():
t_f_isglb = f_isglb.pop(0)
t_u_isglb = u_isglb.pop(0)
sub_obj.get_f_geo().set_glbIdx(t_f_isglb.getIndices())
sub_obj.get_u_geo().set_glbIdx(t_u_isglb.getIndices())
t_f_isglb = f_isglb.pop(0) # force free additional degree of freedomes
t_u_isglb = u_isglb.pop(0) # velocity free additional degree of freedomes
obj0.set_f_glbIdx(t_f_isglb.getIndices())
obj0.set_u_glbIdx(t_u_isglb.getIndices())
else:
t_f_isglb = f_isglb.pop(0)
t_u_isglb = u_isglb.pop(0)
obj0.get_f_geo().set_glbIdx(t_f_isglb.getIndices())
obj0.get_u_geo().set_glbIdx(t_u_isglb.getIndices())
return True
def set_force_free(self):
import numpy.matlib as npm
ffweight = self._ffweigth
err_msg = 'self._M_petsc is NOT assembled'
assert self._M_petsc.isAssembled(), err_msg
for obj1 in self.get_obj_list():
if isinstance(obj1, ForceFreeComposite):
center = obj1.get_center()
_, u_glbIdx_all = obj1.get_u_glbIdx()
_, f_glbIdx_all = obj1.get_f_glbIdx()
# self._M_petsc.zeroRows(u_glbIdx_all)
# self._M_petsc.setValues(u_glbIdx_all, range(f_size), np.zeros(f_size), addv=False)
# self._M_petsc.setValues(range(u_size), f_glbIdx_all, np.zeros(u_size), addv=False)
for sub_obj in obj1.get_obj_list():
r_u = sub_obj.get_u_geo().get_nodes() - center
r_f = sub_obj.get_f_geo().get_nodes() - center
t_I = np.array(((-ffweight[0], 0, 0),
(0, -ffweight[1], 0),
(0, 0, -ffweight[2])))
tmu1 = npm.repmat(t_I, sub_obj.get_n_u_node(), 1)
tmu2 = np.vstack([((0, -ri[2], ri[1]),
(ri[2], 0, -ri[0]),
(-ri[1], ri[0], 0))
for ri in r_u]) * ffweight[3]
tmf1 = npm.repmat(t_I, 1, sub_obj.get_n_f_node())
tmf2 = np.hstack([((0, -ri[2], ri[1]),
(ri[2], 0, -ri[0]),
(-ri[1], ri[0], 0))
for ri in r_f]) * ffweight[3]
tmu = np.hstack((tmu1, tmu2))
tmf = np.vstack((tmf1, tmf2))
_, sub_u_glbIdx_all = sub_obj.get_u_geo().get_glbIdx()
_, sub_f_glbIdx_all = sub_obj.get_f_geo().get_glbIdx()
self._M_petsc.setValues(sub_u_glbIdx_all, f_glbIdx_all, tmu, addv=False)
self._M_petsc.setValues(u_glbIdx_all, sub_f_glbIdx_all, tmf, addv=False)
# # dbg
# PETSc.Sys.Print(sub_u_glbIdx_all, f_glbIdx_all)
self._M_petsc.assemble()
return True
def create_matrix(self):
t0 = time()
self.create_F_U()
# create matrix
# 1. setup matrix
if not self._M_petsc.isAssembled():
self.create_empty_M()
self._M_destroyed = False
# 2. set mij part of matrix
# cmbd_ugeo = geo( )
# cmbd_ugeo.combine([obj.get_u_geo( ) for obj in self.get_all_obj_list( )])
# cmbd_ugeo.set_glbIdx_all(np.hstack([obj.get_u_geo( ).get_glbIdx( )[1] for obj in self.get_all_obj_list( )]))
# cmbd_obj = StokesFlowObj( )
# cmbd_obj.set_data(cmbd_ugeo, cmbd_ugeo)
# self._create_matrix_obj(cmbd_obj, self._M_petsc)
n_obj = len(self.get_all_obj_list())
for i0, obj1 in enumerate(self.get_all_obj_list()):
INDEX = ' %d/%d' % (i0 + 1, n_obj)
self._create_matrix_obj(obj1, self._M_petsc, INDEX)
# 3. set force and torque free part of matrix
self.set_force_free()
# self._M_petsc.view()
t1 = time()
PETSc.Sys.Print(' %s: create matrix use: %fs' % (str(self), (t1 - t0)))
return True
def _solve_force(self, ksp):
kwargs = self._kwargs
getConvergenceHistory = kwargs['getConvergenceHistory']
ffweight = self._ffweigth
if getConvergenceHistory:
ksp.setConvergenceHistory()
ksp.solve(self._velocity_petsc, self._force_petsc)
self._convergenceHistory = ksp.getConvergenceHistory()
else:
ksp.solve(self._velocity_petsc, self._force_petsc)
t_force = self.vec_scatter(self._force_petsc, destroy=False)
tmp = []
for obj0 in self.get_obj_list():
if isinstance(obj0, ForceFreeComposite):
center = obj0.get_center()
for sub_obj in obj0.get_obj_list():
_, f_glbIdx_all = sub_obj.get_f_geo().get_glbIdx()
sub_obj.set_force(t_force[f_glbIdx_all])
tmp.append(t_force[f_glbIdx_all])
_, f_glbIdx_all = obj0.get_f_glbIdx()
ref_U = t_force[f_glbIdx_all] * ffweight
obj0.set_ref_U(ref_U)
ref_U = obj0.get_ref_U()
# absolute speed
for sub_obj, rel_U in zip(obj0.get_obj_list(), obj0.get_rel_U_list()):
abs_U = ref_U + rel_U
sub_obj.get_u_geo().set_rigid_velocity(abs_U, center=center)
else:
_, f_glbIdx_all = obj0.get_f_geo().get_glbIdx()
obj0.set_force(t_force[f_glbIdx_all])
tmp.append(t_force[f_glbIdx_all])
self._force = np.hstack(tmp)
return True
def _resolve_velocity(self, ksp):
# self._re_velocity = u_rel + w_rel×ri
# self._re_velocity + u_ref + w_ref×ri = u_ref + w_ref×ri + u_rel + w_rel×ri
ffweight = self._ffweigth
re_velocity_petsc = self._M_petsc.createVecLeft()
# re_velocity_petsc.set(0)
self._M_petsc.mult(self._force_petsc, re_velocity_petsc)
self._re_velocity = self.vec_scatter(re_velocity_petsc)
for obj0 in self.get_obj_list():
if isinstance(obj0, ForceFreeComposite):
ref_U = obj0.get_ref_U()
center = obj0.get_center()
for sub_obj in obj0.get_obj_list():
_, u_glbIdx_all = sub_obj.get_u_geo().get_glbIdx()
re_rel_U = self._re_velocity[u_glbIdx_all]
sub_nodes = sub_obj.get_u_geo().get_nodes()
r = sub_nodes - center
t_u = (ref_U[:3] + np.cross(ref_U[3:], r)).flatten()
re_abs_U = t_u + re_rel_U
sub_obj.set_re_velocity(re_abs_U)
# # dbg
# t_list = []
# for sub_obj in obj0.get_obj_list():
# _, u_glbIdx_all = sub_obj.get_u_geo().get_glbIdx()
# re_rel_U = self._re_velocity[u_glbIdx_all]
# sub_nodes = sub_obj.get_u_geo().get_nodes()
# r = sub_nodes - center
# t_u = (ref_U[:3] + np.cross(ref_U[3:], r)).flatten()
# re_abs_U = t_u + re_rel_U
# t_geo = sub_obj.get_u_geo().copy()
# t_geo.set_velocity(re_abs_U)
# t_list.append(t_geo)
# t_geo2 = geo()
# t_geo2.combine(t_list)
# t_geo2.show_velocity()
# _, u_glbIdx_all = obj0.get_u_glbIdx()
# re_sum = self._re_velocity[u_glbIdx_all] * ([-1] * 3 + [1] * 3) / ffweight
# obj0.set_total_force(re_sum) # force free, analytically they are zero.
else:
_, u_glbIdx_all = obj0.get_u_geo().get_glbIdx()
obj0.set_re_velocity(self._re_velocity[u_glbIdx_all])
self._finish_solve = True
return ksp.getResidualNorm()
def show_velocity(self, length_factor=1, show_nodes=True):
geo_list = uniqueList()
for obj1 in self.get_obj_list():
if isinstance(obj1, ForceFreeComposite):
for obj2 in obj1.get_obj_list():
geo_list.append(obj2.get_u_geo())
else:
geo_list.append(obj1.get_u_geo())
temp_geo = base_geo()
temp_geo.combine(geo_list)
temp_geo.show_velocity(length_factor=length_factor, show_nodes=show_nodes)
return True
def show_force(self, length_factor=1, show_nodes=True):
geo_list = uniqueList()
t_force = []
for obj1 in self.get_obj_list():
if isinstance(obj1, ForceFreeComposite):
for obj2 in obj1.get_obj_list():
geo_list.append(obj2.get_u_geo())
t_force.append(obj2.get_force())
else:
geo_list.append(obj1.get_u_geo())
t_force.append(obj1.get_force())
temp_geo = base_geo()
temp_geo.combine(geo_list)
temp_geo.set_velocity(np.hstack(t_force))
temp_geo.show_velocity(length_factor=length_factor, show_nodes=show_nodes)
return True
def show_re_velocity(self, length_factor=1, show_nodes=True):
geo_list = uniqueList()
for obj1 in self.get_obj_list():
if isinstance(obj1, ForceFreeComposite):
for obj2 in obj1.get_obj_list():
t_geo = obj2.get_u_geo().copy()
t_geo.set_velocity(obj2.get_re_velocity())
geo_list.append(t_geo)
else:
t_geo = obj1.get_u_geo().copy()
t_geo.set_velocity(obj1.get_re_velocity())
geo_list.append(t_geo)
temp_geo = base_geo()
temp_geo.combine(geo_list)
temp_geo.show_velocity(length_factor=length_factor, show_nodes=show_nodes)
return True
# def vtk_self(self, filename, stp_idx=0):
# self.check_finish_solve()
# obj_list = uniqueList()
# for obj0 in self.get_obj_list():
# if isinstance(obj0, forcefreeComposite):
# for obj1 in obj0.get_obj_list():
# obj_list.append(obj1)
# else:
# obj_list.append(obj0)
# obj0 = StokesFlowObj()
# obj0.combine(obj_list, set_re_u=True, set_force=True)
# obj0.set_name('Prb')
# obj0.vtk(filename, stp_idx)
# return True
def vtk_check(self, filename: str, obj: "StokesFlowObj", ref_slt=None):
obj_tube = list(tube_flatten((obj,)))
err = []
for obj in obj_tube:
if isinstance(obj, StokesFlowObj):
err.append(self._vtk_check(filename + '_' + str(obj) + '_check', obj, ref_slt))
elif isinstance(obj, ForceFreeComposite):
err_msg = 'ref_slt must be None if imput is a forcefreeComposite. '
assert ref_slt is None, err_msg
for t_err in self._vtk_composite_check(filename, obj):
err.append(t_err)
else:
err_msg = 'unknown obj type. '
raise err_msg
return tube_flatten((err,))
def _vtk_composite_check(self, filename: str, obj: "ForceFreeComposite"):
error = []
ref_U = obj.get_ref_U()
center = obj.get_center()
for subobj, rel_U in zip(obj.get_obj_list(), obj.get_rel_U_list()):
U = ref_U + rel_U
subobj.get_u_geo().set_rigid_velocity(U, center=center)
error.append(
self._vtk_check(filename + '_' + str(subobj) + '_check', subobj,
INDEX=str(subobj)))
return tube_flatten((error,))
def _save_M_mat_dict(self, M_dict, obj):
if isinstance(obj, ForceFreeComposite):
for subobj in obj.get_obj_list():
super()._save_M_mat_dict(M_dict, subobj)
t_name_all = str(obj) + '_Idx_all'
t_name = str(obj) + '_Idx'
u_glbIdx, u_glbIdx_all = obj.get_u_glbIdx()
M_dict[t_name_all] = u_glbIdx_all
M_dict[t_name] = u_glbIdx
else:
super()._save_M_mat_dict(M_dict, obj)
return True
def _unpick_addDM(self, obj1):
if isinstance(obj1, ForceFreeComposite):
for sub_obj in obj1.get_obj_list():
super()._unpick_addDM(sub_obj)
self._f_pkg.addDM(obj1.get_f_dmda())
self._u_pkg.addDM(obj1.get_u_dmda())
else:
super()._unpick_addDM(obj1)
return True
def unpick_myself(self, check_MPISIZE=True):
super(ForceFreeProblem, self).unpick_myself(check_MPISIZE=check_MPISIZE)
# this property has been added in some update.
if not hasattr(self, '_ffweigth'):
ffweight = self.get_kwargs()['ffweight']
self._ffweigth = np.array([ffweight, ffweight, ffweight,
ffweight ** 2, ffweight ** 2, ffweight ** 2])
PETSc.Sys.Print('-->absolute force free weight %s ' % self._ffweigth)
return True
def update_location(self, eval_dt, print_handle=''):
super().update_location(eval_dt, print_handle)
self.set_force_free()
return True
class ForceFreeIterateProblem(ForceFreeProblem):
def set_ffweight(self, ffweightx, ffweighty, ffweightz, ffweightT):
pass
return True
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._iterComp = ... # type: ForceFreeComposite
def add_obj(self, obj):
if isinstance(obj, ForceFreeComposite):
self._obj_list.append(obj)
obj.set_index(self.get_n_obj())
obj.set_problem(self)
for sub_obj in obj.get_obj_list():
self._check_add_obj(sub_obj)
self._all_obj_list.append(sub_obj)
self._f_pkg.addDM(sub_obj.get_f_geo().get_dmda())
self._u_pkg.addDM(sub_obj.get_u_geo().get_dmda())
self._n_fnode += sub_obj.get_n_f_node()
self._n_unode += sub_obj.get_n_u_node()
sub_obj.set_matrix_method(self.get_matrix_method())
else:
self._all_obj_list.append(obj)
super().add_obj(obj)
return True
def _set_glbIdx(self):
return StokesFlowProblem._set_glbIdx(self)
def _solve_force(self, ksp):
kwargs = self._kwargs
getConvergenceHistory = kwargs['getConvergenceHistory']
if getConvergenceHistory:
ksp.setConvergenceHistory()
ksp.solve(self._velocity_petsc, self._force_petsc)
self._convergenceHistory = ksp.getConvergenceHistory()
else:
ksp.solve(self._velocity_petsc, self._force_petsc)
t_force = self.vec_scatter(self._force_petsc, destroy=False)
tmp = []
for obj0 in self.get_obj_list():
if isinstance(obj0, ForceFreeComposite):
center = obj0.get_center()
for sub_obj in obj0.get_obj_list():
_, f_glbIdx_all = sub_obj.get_f_geo().get_glbIdx()
sub_obj.set_force(t_force[f_glbIdx_all])
tmp.append(t_force[f_glbIdx_all])
ref_U = obj0.get_ref_U()
# absolute speed
for sub_obj, rel_U in zip(obj0.get_obj_list(), obj0.get_rel_U_list()):
abs_U = ref_U + rel_U
sub_obj.get_u_geo().set_rigid_velocity(abs_U, center=center)
else:
_, f_glbIdx_all = obj0.get_f_geo().get_glbIdx()
obj0.set_force(t_force[f_glbIdx_all])
tmp.append(t_force[f_glbIdx_all])
self._force = np.hstack(tmp)
return True
def _resolve_velocity(self, ksp):
# self._re_velocity = u_rel + w_rel×ri
# self._re_velocity + u_ref + w_ref×ri = u_ref + w_ref×ri + u_rel + w_rel×ri
ffweight = self._ffweigth
re_velocity_petsc = self._M_petsc.createVecLeft()
# re_velocity_petsc.set(0)
self._M_petsc.mult(self._force_petsc, re_velocity_petsc)
self._re_velocity = self.vec_scatter(re_velocity_petsc)
for obj0 in self.get_obj_list():
if isinstance(obj0, ForceFreeComposite):
ref_U = obj0.get_ref_U()
center = obj0.get_center()
for sub_obj in obj0.get_obj_list():
_, u_glbIdx_all = sub_obj.get_u_geo().get_glbIdx()
re_rel_U = self._re_velocity[u_glbIdx_all]
sub_nodes = sub_obj.get_u_geo().get_nodes()
r = sub_nodes - center
t_u = (ref_U[:3] + np.cross(ref_U[3:], r)).flatten()
re_abs_U = t_u + re_rel_U
sub_obj.set_re_velocity(re_abs_U)
else:
_, u_glbIdx_all = obj0.get_u_geo().get_glbIdx()
obj0.set_re_velocity(self._re_velocity[u_glbIdx_all])
self._finish_solve = True
return ksp.getResidualNorm()
def set_iterate_comp(self, iterComp):
# set objects that varying their velocity to reach force free condition.
# other object in the problem have given velocity.
self._iterComp = iterComp
return True
def _create_U(self):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
velocity = self._u_pkg.createGlobalVector()
velocity.zeroEntries()
for obj0 in self.get_obj_list():
if isinstance(obj0, ForceFreeComposite):
center = obj0.get_center()
for sub_obj, rel_U in zip(obj0.get_obj_list(), obj0.get_rel_U_list()):
sub_nodes = sub_obj.get_u_geo().get_nodes()
# sub_obj.show_velocity(length_factor=0.1, show_nodes=True)
r = sub_nodes - center
abs_U = rel_U + obj0.get_ref_U()
t_u = (abs_U[:3] + np.cross(abs_U[3:], r)).flatten()
_, u_glbIdx_all = sub_obj.get_u_geo().get_glbIdx()
if rank == 0:
velocity[u_glbIdx_all] = t_u[:]
else:
u0 = obj0.get_velocity()
_, u_glbIdx_all = obj0.get_u_geo().get_glbIdx()
if rank == 0:
velocity[u_glbIdx_all] = u0[:]
velocity.assemble()
self._velocity_petsc = velocity
return True
def create_matrix(self):
return StokesFlowProblem.create_matrix(self)
def solve_sumForce(self, refU):
self._iterComp.set_ref_U(refU)
self.create_F_U()
self.solve()
center = self._iterComp.get_center()
sum_force = np.sum(
[tobj.get_total_force(center=center) for tobj in self._iterComp.get_obj_list()],
axis=0)
tF = sum_force[:3]
tT = sum_force[3:]
return tF, tT
def each_iterate(self, u0, w0, u1, w1):
F00, T00 = self.solve_sumForce(np.hstack((u0, w0)))
F01, T01 = self.solve_sumForce(np.hstack((u0, w1)))
w2 = (w0 - w1) / (T00 - T01) * (0 - T00) + w0
PETSc.Sys.Print(' w0=%s, w1=%s, T00=%s, T01=%s, w2=%s' % (w0, w1, T00, T01, w2))
F02, T02 = self.solve_sumForce(np.hstack((u0, w2)))
F12, T12 = self.solve_sumForce(np.hstack((u1, w2)))
u2 = (u0 - u1) / (F02 - F12) * (0 - F02) + u0
PETSc.Sys.Print(' u0=%s, u1=%s, F02=%s, F12=%s, u2=%s' % (u0, u1, F02, F12, u2))
return u2, w2
def do_iterate(self, ini_refU0=np.zeros(6), ini_refU1=np.ones(6), max_it=1000, tolerate=1e-3):
u0 = ini_refU0[:3]
u1 = ini_refU1[:3]
w0 = ini_refU0[3:]
w1 = ini_refU1[3:]
PETSc.Sys.Print('-->>iterate: %d' % 0)
F_reference, _ = self.solve_sumForce(refU=np.hstack((u1, w0)))
_, T_reference = self.solve_sumForce(refU=np.hstack((u0, w1)))
tol = tolerate * 100
n_it = 0 # # of iterate
while np.any(tol > tolerate) and n_it < max_it:
PETSc.Sys.Print('-->>iterate: %d' % (n_it + 1))
u2, w2 = self.each_iterate(u0, w0, u1, w1)
F22, T22 = self.solve_sumForce(np.hstack((u2, w2)))
Ftol = np.abs(F22 / F_reference)
Ttol = np.abs(T22 / T_reference)
tol = np.hstack((Ftol, Ttol))
PETSc.Sys.Print(
' u2=%s, w2=%s, F22=%s, T22=%s, Ftol=%s, Ttol=%s' % (
u2, w2, F22, T22, Ftol, Ttol))
u0, u1 = u1, u2
w0, w1 = w1, w2
n_it = n_it + 1
return np.hstack((u2, w2)), Ftol, Ttol
def each_iterate2(self, u0, w0, u1, w1, F11, T11, relax_fct=1):
F10, T10 = self.solve_sumForce(np.hstack((u1, w0)))
F01, T01 = self.solve_sumForce(np.hstack((u0, w1)))
w2 = (w0 - w1) / (T10 - T11) * (0 - T10) * relax_fct + w0
u2 = (u0 - u1) / (F01 - F11) * (0 - F01) * relax_fct + u0
PETSc.Sys.Print(' w0=%s, w1=%s, T10=%s, T11=%s, w2=%s' % (w0, w1, T10, T11, w2))
PETSc.Sys.Print(' u0=%s, u1=%s, F01=%s, F11=%s, u2=%s' % (u0, u1, F01, F11, u2))
return u2, w2
def do_iterate2(self, ini_refU0=np.zeros(6), ini_refU1=np.ones(6), max_it=1000, tolerate=1e-3):
u0 = ini_refU0[:3]
u1 = ini_refU1[:3]
w0 = ini_refU0[3:]
w1 = ini_refU1[3:]
PETSc.Sys.Print('-->>iterate: %d' % 0)
F_reference, _ = self.solve_sumForce(refU=np.hstack((u1, w0)))
_, T_reference = self.solve_sumForce(refU=np.hstack((u0, w1)))
n_it = 0 # # of iterate
F11, T11 = self.solve_sumForce(np.hstack((u1, w1)))
Ftol = np.abs(F11 / F_reference)
Ttol = np.abs(T11 / T_reference)
PETSc.Sys.Print(
' u1=%s, w1=%s, F11=%s, T11=%s, Ftol=%s, Ttol=%s' % (u1, w1, F11, T11, Ftol, Ttol))
tol = np.hstack((Ftol, Ttol))
while np.any(tol > tolerate) and n_it < max_it:
PETSc.Sys.Print('-->>iterate: %d' % (n_it + 1))
u2, w2 = self.each_iterate2(u0, w0, u1, w1, F11, T11)
F22, T22 = self.solve_sumForce(np.hstack((u2, w2)))
Ftol = np.abs(F22 / F_reference)
Ttol = np.abs(T22 / T_reference)
tol = np.hstack((Ftol, Ttol))
PETSc.Sys.Print(
' u2=%s, w2=%s, F22=%s, T22=%s, Ftol=%s, Ttol=%s' % (
u2, w2, F22, T22, Ftol, Ttol))
u0, u1 = u1, u2
w0, w1 = w1, w2
F11, T11 = F22, T22
n_it = n_it + 1
return np.hstack((u2, w2)), Ftol, Ttol
def do_iterate3(self, ini_refU0=np.zeros(6), ini_refU1=np.ones(6), max_it=100,
rtol=1e-3, atol=1e-10, relax_fct=1):
u0 = ini_refU0[:3]
u1 = ini_refU1[:3]
w0 = ini_refU0[3:]
w1 = ini_refU1[3:]
n_it = 0 # # of iterate
F11, T11 = self.solve_sumForce(np.hstack((u1, w1)))
# max_rel_err = np.max((2 * np.abs(u0 - u1) / (np.abs(u0) + np.abs(u1)),
# 2 * np.abs(w0 - w1) / (np.abs(w0) + np.abs(w1))))
max_rel_err = np.max((np.abs((u0 - u1) / np.linalg.norm(u1)),
np.abs((w0 - w1) / np.linalg.norm(w1))))
max_abs_err = np.max((np.abs(u0 - u1), np.abs(w0 - w1)))
PETSc.Sys.Print(' u0=%s, w0=%s, u1=%s, w1=%s' % (u0, w0, u1, w1))
PETSc.Sys.Print(' max_rel_err=%e, max_abs_err=%e' % (max_rel_err, max_abs_err))
while max_rel_err > rtol and max_abs_err > atol and n_it < max_it:
PETSc.Sys.Print('-->>iterate: %d' % (n_it + 1))
u2, w2 = self.each_iterate2(u0, w0, u1, w1, F11, T11, relax_fct=relax_fct)
F22, T22 = self.solve_sumForce(np.hstack((u2, w2)))
u0, u1 = u1, u2
w0, w1 = w1, w2
F11, T11 = F22, T22
n_it = n_it + 1
# max_rel_err = np.max((2 * np.abs(u0 - u1) / (np.linalg.norm(u0) + np.linalg.norm(u1)),
# 2 * np.abs(w0 - w1) / (np.linalg.norm(w0) + np.linalg.norm(w1))))
max_rel_err = np.max((np.abs((u0 - u1) / np.linalg.norm(u1)),
np.abs((w0 - w1) / np.linalg.norm(w1))))
max_abs_err = np.max((np.abs(u0 - u1), np.abs(w0 - w1)))
PETSc.Sys.Print(' u2=%s, w2=%s' % (u2, w2))
PETSc.Sys.Print(' max_rel_err=%e, max_abs_err=%e' % (max_rel_err, max_abs_err))
# PETSc.Sys.Print(' DBG', max_rel_err > rtol, max_abs_err > atol, n_it < max_it)
refU = np.hstack((u1, w1))
self._iterComp.set_ref_U(refU)
return np.hstack((u1, w1))
def set_force_free(self):
pass
class ForceFree1DInfProblem(ForceFreeProblem):
def _init_kwargs(self, axis='z', **kwargs):
# axis: symmetrical axis
if axis == 'x':
ffweightF = kwargs['ffweightx'] / kwargs['zoom_factor']
elif axis == 'y':
ffweightF = kwargs['ffweighty'] / kwargs['zoom_factor']
elif axis == 'z':
ffweightF = kwargs['ffweightz'] / kwargs['zoom_factor']
else:
err_msg = 'wrong symmetrical axis, it should be one of (x, y, z). '
raise ValueError(err_msg)
ffweightT = kwargs['ffweightT'] / kwargs['zoom_factor']
self._ffweigth = np.array([ffweightF, ffweightT ** 2])
PETSc.Sys.Print(
'-->absolute force free weight of 1D symmetrical problem is %s ' % self._ffweigth)
return True
def __init__(self, axis='z', **kwargs):
super().__init__(**kwargs)
self._axis = axis # symmetrical axis
def get_axis(self):
return self._axis
# def _create_U(self):
# comm = PETSc.COMM_WORLD.tompi4py()
# rank = comm.Get_rank()
# velocity = self._u_pkg.createGlobalVector()
# velocity.zeroEntries()
# for obj0 in self.get_obj_list():
# if isinstance(obj0, forcefreeComposite):
# center = obj0.get_center()
# for sub_obj, rel_U in zip(obj0.get_obj_list(), obj0.get_rel_U_list()):
# sub_nodes = sub_obj.get_u_geo().get_nodes()
# # sub_obj.show_velocity(length_factor=0.1, show_nodes=True)
# r = sub_nodes - center
# t_u = (rel_U[:3] + np.cross(rel_U[3:], r)).flatten()
# _, u_glbIdx_all = sub_obj.get_u_geo().get_glbIdx()
# if rank == 0:
# velocity[u_glbIdx_all] = t_u[:]
# else:
# u0 = obj0.get_velocity()
# _, u_glbIdx_all = obj0.get_u_geo().get_glbIdx()
# if rank == 0:
# velocity[u_glbIdx_all] = u0[:]
# velocity.assemble()
# self._velocity_petsc = velocity
# return True
# def _set_glbIdx(self):
# # global index
# f_isglb = self._f_pkg.getGlobalISs()
# u_isglb = self._u_pkg.getGlobalISs()
# for obj0 in self.get_obj_list():
# if isinstance(obj0, forcefreeComposite):
# for sub_obj in obj0.get_obj_list():
# t_f_isglb = f_isglb.pop(0)
# t_u_isglb = u_isglb.pop(0)
# sub_obj.get_f_geo().set_glbIdx(t_f_isglb.getIndices())
# sub_obj.get_u_geo().set_glbIdx(t_u_isglb.getIndices())
# t_f_isglb = f_isglb.pop(0) # force free additional degree of freedomes
# t_u_isglb = u_isglb.pop(0) # velocity free additional degree of freedomes
# obj0.set_f_glbIdx(t_f_isglb.getIndices())
# obj0.set_u_glbIdx(t_u_isglb.getIndices())
# else:
# t_f_isglb = f_isglb.pop(0)
# t_u_isglb = u_isglb.pop(0)
# obj0.get_f_geo().set_glbIdx(t_f_isglb.getIndices())
# obj0.get_u_geo().set_glbIdx(t_u_isglb.getIndices())
# return True
def set_force_free(self):
import numpy.matlib as npm
ffweight = self._ffweigth
err_msg = 'self._M_petsc is NOT assembled'
assert self._M_petsc.isAssembled(), err_msg
for obj1 in self.get_obj_list():
if isinstance(obj1, ForceFreeComposite):
center = obj1.get_center()
_, u_glbIdx_all = obj1.get_u_glbIdx()
_, f_glbIdx_all = obj1.get_f_glbIdx()
# self._M_petsc.zeroRows(u_glbIdx_all)
# self._M_petsc.setValues(u_glbIdx_all, range(f_size), np.zeros(f_size), addv=False)
# self._M_petsc.setValues(range(u_size), f_glbIdx_all, np.zeros(u_size), addv=False)
for sub_obj in obj1.get_obj_list():
r_u = sub_obj.get_u_geo().get_nodes() - center
r_f = sub_obj.get_f_geo().get_nodes() - center
axis = self.get_axis()
if axis == 'x':
t_I = np.array((-ffweight[0], 0, 0))
tmu2 = np.vstack([(0, -ri[2], ri[1]) for ri in r_u]) * ffweight[1]
tmf2 = np.hstack([(0, -ri[2], ri[1]) for ri in r_f]) * ffweight[1]
elif axis == 'y':
t_I = np.array((0, -ffweight[0], 0))
tmu2 = np.vstack([(ri[2], 0, -ri[0]) for ri in r_u]) * ffweight[1]
tmf2 = np.hstack([(ri[2], 0, -ri[0]) for ri in r_f]) * ffweight[1]
elif axis == 'z':
t_I = np.array((0, 0, -ffweight[0]))
tmu2 = np.vstack([(-ri[1], ri[0], 0) for ri in r_u]) * ffweight[1]
tmf2 = np.hstack([(-ri[1], ri[0], 0) for ri in r_f]) * ffweight[1]
tmu1 = npm.repmat(t_I, sub_obj.get_n_u_node(), 1)
tmf1 = npm.repmat(t_I, 1, sub_obj.get_n_f_node())
tmu = np.dstack((tmu1.flatten(), tmu2.flatten()))[0]
tmf = np.vstack((tmf1, tmf2))
_, sub_u_glbIdx_all = sub_obj.get_u_geo().get_glbIdx()
_, sub_f_glbIdx_all = sub_obj.get_f_geo().get_glbIdx()
self._M_petsc.setValues(sub_u_glbIdx_all, f_glbIdx_all, tmu, addv=False)
self._M_petsc.setValues(u_glbIdx_all, sub_f_glbIdx_all, tmf, addv=False)
# # dbg
# PETSc.Sys.Print(sub_u_glbIdx_all, f_glbIdx_all)
self._M_petsc.assemble()
return True
# def create_matrix(self):
# t0 = time()
# self.create_F_U()
#
# # create matrix
# # 1. setup matrix
# if not self._M_petsc.isAssembled():
# self.create_empty_M()
# # 2. set mij part of matrix
# # cmbd_ugeo = geo( )
# # cmbd_ugeo.combine([obj.get_u_geo( ) for obj in self.get_all_obj_list( )])
# # cmbd_ugeo.set_glbIdx_all(np.hstack([obj.get_u_geo( ).get_glbIdx( )[1] for obj in self.get_all_obj_list( )]))
# # cmbd_obj = StokesFlowObj( )
# # cmbd_obj.set_data(cmbd_ugeo, cmbd_ugeo)
# # self._create_matrix_obj(cmbd_obj, self._M_petsc)
# n_obj = len(self.get_all_obj_list())
# for i0, obj1 in enumerate(self.get_all_obj_list()):
# INDEX = ' %d/%d' % (i0 + 1, n_obj)
# self._create_matrix_obj(obj1, self._M_petsc, INDEX)
# # 3. set force and torque free part of matrix
# self.set_force_free()
# # self._M_petsc.view()
#
# t1 = time()
# PETSc.Sys.Print(' %s: create matrix use: %fs' % (str(self), (t1 - t0)))
# return True
def _solve_force(self, ksp):
kwargs = self._kwargs
getConvergenceHistory = kwargs['getConvergenceHistory']
ffweight = self._ffweigth
axis = self.get_axis()
if getConvergenceHistory:
ksp.setConvergenceHistory()
ksp.solve(self._velocity_petsc, self._force_petsc)
self._convergenceHistory = ksp.getConvergenceHistory()
else:
ksp.solve(self._velocity_petsc, self._force_petsc)
# # dbg
# re_velocity_petsc = self._M_petsc.createVecLeft()
# self._M_petsc.mult(self._force_petsc, re_velocity_petsc)
t_force = self.vec_scatter(self._force_petsc, destroy=False)
tmp = []
for obj0 in self.get_obj_list():
if isinstance(obj0, ForceFreeComposite):
center = obj0.get_center()
for sub_obj in obj0.get_obj_list():
_, f_glbIdx_all = sub_obj.get_f_geo().get_glbIdx()
sub_obj.set_force(t_force[f_glbIdx_all])
tmp.append(t_force[f_glbIdx_all])
_, f_glbIdx_all = obj0.get_f_glbIdx()
ref_U = t_force[f_glbIdx_all] * ffweight
if axis == 'x':
ref_U = np.array([ref_U[0], 0, 0, ref_U[1], 0, 0])
elif axis == 'y':
ref_U = np.array([0, ref_U[0], 0, 0, ref_U[1], 0])
elif axis == 'z':
ref_U = np.array([0, 0, ref_U[0], 0, 0, ref_U[1]])
obj0.set_ref_U(ref_U)
# absolute speed
for sub_obj, rel_U in zip(obj0.get_obj_list(), obj0.get_rel_U_list()):
abs_U = ref_U + rel_U
sub_obj.get_u_geo().set_rigid_velocity(abs_U, center=center)
else:
_, f_glbIdx_all = obj0.get_f_geo().get_glbIdx()
obj0.set_force(t_force[f_glbIdx_all])
tmp.append(t_force[f_glbIdx_all])
self._force = np.hstack(tmp)
return True
def _resolve_velocity(self, ksp):
ffweight = self._ffweigth
re_velocity_petsc = self._M_petsc.createVecLeft()
# re_velocity_petsc.set(0)
self._M_petsc.mult(self._force_petsc, re_velocity_petsc)
self._re_velocity = self.vec_scatter(re_velocity_petsc)
axis = self.get_axis()
for obj0 in self.get_obj_list():
if isinstance(obj0, ForceFreeComposite):
ref_U = obj0.get_ref_U()
center = obj0.get_center()
for sub_obj in obj0.get_obj_list():
_, u_glbIdx_all = sub_obj.get_u_geo().get_glbIdx()
re_rel_U = self._re_velocity[u_glbIdx_all]
sub_nodes = sub_obj.get_u_geo().get_nodes()
r = sub_nodes - center
t_u = (ref_U[:3] + np.cross(ref_U[3:], r)).flatten()
re_abs_U = t_u + re_rel_U
sub_obj.set_re_velocity(re_abs_U)
# _, u_glbIdx_all = obj0.get_u_glbIdx()
# re_sum = self._re_velocity[u_glbIdx_all] * [-1, 1] / ffweight
# if axis == 'x':
# re_sum = np.array([re_sum[0], 0, 0, re_sum[1], 0, 0])
# elif axis == 'y':
# re_sum = np.array([0, re_sum[0], 0, 0, re_sum[1], 0])
# elif axis == 'z':
# re_sum = np.array([0, 0, re_sum[0], 0, 0, re_sum[1]])
# obj0.set_total_force(re_sum) # force free, analytically they are zero.
else:
_, u_glbIdx_all = obj0.get_u_geo().get_glbIdx()
obj0.set_re_velocity(self._re_velocity[u_glbIdx_all])
self._finish_solve = True
return ksp.getResidualNorm()
class GivenForceProblem(ForceFreeProblem):
def _create_U(self):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
ffweight = self._ffweigth
velocity = self._u_pkg.createGlobalVector()
velocity.zeroEntries()
for obj0 in self.get_obj_list():
if isinstance(obj0, GivenForceComposite):
center = obj0.get_center()
for sub_obj, rel_U in zip(obj0.get_obj_list(), obj0.get_rel_U_list()):
sub_nodes = sub_obj.get_u_geo().get_nodes()
# sub_obj.show_velocity(length_factor=0.1, show_nodes=True)
r = sub_nodes - center
t_u = (rel_U[:3] + np.cross(rel_U[3:], r)).flatten()
_, u_glbIdx_all = sub_obj.get_u_geo().get_glbIdx()
if rank == 0:
velocity[u_glbIdx_all] = t_u[:]
_, u_glbIdx_all = obj0.get_u_glbIdx()
givenF = obj0.get_givenF() * (
[-1] * 3 + [1] * 3) # sum(-1*F)=-F_give, sum(r*F)=T_give
if rank == 0:
velocity[u_glbIdx_all] = givenF * ffweight
else:
u0 = obj0.get_velocity()
_, u_glbIdx_all = obj0.get_u_geo().get_glbIdx()
if rank == 0:
velocity[u_glbIdx_all] = u0[:]
velocity.assemble()
self._velocity_petsc = velocity
return True
class givenForce1DInfPoblem(ForceFree1DInfProblem, GivenForceProblem):
def _create_U(self):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
ffweight = self._ffweigth
velocity = self._u_pkg.createGlobalVector()
velocity.zeroEntries()
for obj0 in self.get_obj_list():
if isinstance(obj0, GivenForce1DInfComposite):
center = obj0.get_center()
for sub_obj, rel_U in zip(obj0.get_obj_list(), obj0.get_rel_U_list()):
sub_nodes = sub_obj.get_u_geo().get_nodes()
# sub_obj.show_velocity(length_factor=0.1, show_nodes=True)
r = sub_nodes - center
t_u = (rel_U[:3] + np.cross(rel_U[3:], r)).flatten()
_, u_glbIdx_all = sub_obj.get_u_geo().get_glbIdx()
if rank == 0:
velocity[u_glbIdx_all] = t_u[:]
_, u_glbIdx_all = obj0.get_u_glbIdx()
givenF = obj0.get_givenF() * (
[-1] * 3 + [1] * 3) # sum(-1*F)=-F_give, sum(r*F)=T_give
axis = self.get_axis()
if rank == 0:
if axis == 'x':
velocity[u_glbIdx_all] = givenF[[0, 3]] * ffweight
elif axis == 'y':
velocity[u_glbIdx_all] = givenF[[1, 4]] * ffweight
elif axis == 'z':
velocity[u_glbIdx_all] = givenF[[2, 5]] * ffweight
else:
u0 = obj0.get_velocity()
_, u_glbIdx_all = obj0.get_u_geo().get_glbIdx()
if rank == 0:
velocity[u_glbIdx_all] = u0[:]
velocity.assemble()
self._velocity_petsc = velocity
return True
class StokesletsInPipeforcefreeProblem(StokesletsInPipeProblem, ForceFreeProblem):
def _nothing(self):
pass
class StokesletsInPipeforcefreeIterateProblem(StokesletsInPipeProblem, ForceFreeIterateProblem):
def _nothing(self):
pass
class StokesletsTwoPlaneProblem(StokesFlowProblem):
# stokes flow between two plane, one is move in a constant velocity.
# U_all = U_shear + U_twoPlane.
# two planes are paralleled with XY plane, shear flow is vertical to z axis.
# see Liron, Nadav, and S. Mochon. "Stokes flow for a stokeslets between two parallel flat plates." Journal of Engineering Mathematics 10.4 (1976): 287-303.
def _init_kwargs(self, **kwargs):
self._twoPlateHeight = kwargs['twoPlateHeight']
return True
def _check_add_obj(self, obj):
h = self._twoPlateHeight
nodes = obj.get_u_geo().get_nodes()
err_msg = 'z coordinate of nodes is out of range (%f, %f)' % (0, h)
assert np.all(nodes[:, 2] < h) and np.all(nodes[:, 2] > 0), err_msg
return True
class _GivenFlowProblem(StokesFlowProblem):
# assuming the problem have a given background flow, subtract, solve and add again.
def get_given_flow(self, obj):
given_u = np.zeros(obj.get_n_u_node() * obj.get_n_unknown())
for obj2 in self.get_all_obj_list():
if isinstance(obj2, FundSoltObj):
for location, force, StokesletsHandle in obj2.get_point_force_list():
# subtract the velocity due to the force at obj1
m_f2 = StokesletsHandle(obj.get_u_nodes(), location)
given_u = given_u + np.dot(m_f2, force)
return given_u
def get_given_flow_at(self, location):
# from src.geo import geo
temp_geo1 = base_geo() # velocity nodes
temp_geo1.set_nodes(location, deltalength=0)
temp_obj1 = StokesFlowObj()
temp_obj1.set_data(temp_geo1, temp_geo1, np.zeros(location.size))
return self.get_given_flow(temp_obj1)
def subtract_given_flow_obj(self, obj):
given_u = self.get_given_flow(obj)
ugeo = obj.get_u_geo()
ugeo.set_velocity(ugeo.get_velocity() - given_u)
return True
def add_given_flow_obj(self, obj):
given_u = self.get_given_flow(obj)
ugeo = obj.get_u_geo()
ugeo.set_velocity(ugeo.get_velocity() + given_u)
return True
def _create_U(self):
for obj0 in self.get_all_obj_list():
self.subtract_given_flow_obj(obj0)
super(_GivenFlowProblem, self)._create_U()
for obj0 in self.get_all_obj_list():
self.add_given_flow_obj(obj0)
return True
def _resolve_velocity(self, ksp):
ksp_norm = super()._resolve_velocity(ksp)
for obj0 in self.get_all_obj_list():
given_u = self.get_given_flow(obj0)
obj0.set_re_velocity(obj0.get_re_velocity() + given_u)
return ksp_norm
def solve_obj_u(self, obj: 'StokesFlowObj', INDEX=''):
obj_u = super().solve_obj_u(obj, INDEX)
given_u = self.get_given_flow(obj)
return obj_u + given_u
def update_location(self, eval_dt, print_handle=''):
super().update_location(eval_dt, print_handle)
self.create_F_U()
return True
class StrainRateBaseProblem(_GivenFlowProblem):
def _init_kwargs(self, **kwargs):
super()._init_kwargs(**kwargs)
self._basei = 0
self.set_basei(kwargs['basei'])
return True
def base_fun(self):
# u=(z, 0, 0)
def base0(unodes):
n_nodes = unodes.shape[0]
u = np.vstack((unodes[:, 2], np.zeros(n_nodes), np.zeros(n_nodes))).T.flatten()
return u
# u=(x, -y, 0)
def base1(unodes):
n_nodes = unodes.shape[0]
u = np.vstack((unodes[:, 0], -1 * unodes[:, 1], np.zeros(n_nodes))).T.flatten()
return u
# u=(0, -y, z)
def base2(unodes):
n_nodes = unodes.shape[0]
u = np.vstack((np.zeros(n_nodes), -1 * unodes[:, 1], unodes[:, 2])).T.flatten()
return u
# u=(y, x, 0)
def base3(unodes):
n_nodes = unodes.shape[0]
u = np.vstack((unodes[:, 1], unodes[:, 0], np.zeros(n_nodes))).T.flatten()
return u
# u=(z, 0, x)
def base4(unodes):
n_nodes = unodes.shape[0]
u = np.vstack((unodes[:, 2], np.zeros(n_nodes), unodes[:, 0])).T.flatten()
return u
# u=(0, z, y)
def base5(unodes):
n_nodes = unodes.shape[0]
u = np.vstack((np.zeros(n_nodes), unodes[:, 2], unodes[:, 1])).T.flatten()
return u
# u=(0, -z, y)
def base6(unodes):
n_nodes = unodes.shape[0]
u = np.vstack((np.zeros(n_nodes), -1 * unodes[:, 2], unodes[:, 1])).T.flatten()
return u
# u=(z, 0, -x)
def base7(unodes):
n_nodes = unodes.shape[0]
u = np.vstack((unodes[:, 2], np.zeros(n_nodes), -1 * unodes[:, 0])).T.flatten()
return u
# u=(-y, x, 0)
def base8(unodes):
n_nodes = unodes.shape[0]
u = np.vstack((-1 * unodes[:, 1], unodes[:, 0], np.zeros(n_nodes))).T.flatten()
return u
# u=(0, 0, 0)
def base9(unodes):
n_nodes = unodes.shape[0]
u = np.vstack((np.zeros(n_nodes), np.zeros(n_nodes), np.zeros(n_nodes))).T.flatten()
return u
# u=(0, 0, 0)
def base10(unodes):
n_nodes = unodes.shape[0]
u = np.vstack((np.zeros(n_nodes), np.ones(n_nodes), np.zeros(n_nodes))).T.flatten()
return u
_base_fun = {0: base0,
1: base1,
2: base2,
3: base3,
4: base4,
5: base5,
6: base6,
7: base7,
8: base8,
9: base9,
10: base10, }
return _base_fun
def get_given_flow(self, obj):
basei = self.get_basei()
given_u = super().get_given_flow(obj)
given_u = given_u + self.base_fun()[basei](obj.get_u_nodes())
return given_u
def set_basei(self, basei):
base_keys = list(self.base_fun().keys())
assert basei in base_keys
self._basei = basei
return True
def get_basei(self):
return self._basei
class ShearFlowProblem(_GivenFlowProblem):
def _init_kwargs(self, **kwargs):
super()._init_kwargs(**kwargs)
self._planeShearRate = np.zeros(3)
self._planeShearNorm = kwargs['planeShearNorm']
self.set_planeShearRate(kwargs['planeShearRate'])
return True
def get_given_flow(self, obj):
given_u = super().get_given_flow(obj)
# in this case, background flow is a given shear flow
planeShearRate = self._planeShearRate # for shear flow
planeShearNorm = self._planeShearNorm
ugeo = obj.get_u_geo()
t1 = np.einsum('ij,j', ugeo.get_nodes(), planeShearNorm)
given_u = given_u + np.einsum('i, j->ij', t1, planeShearRate.flatten()).ravel()
return given_u
def set_planeShearRate(self, planeShearRate):
planeShearNorm = self._planeShearNorm
self._planeShearRate = np.array(planeShearRate).ravel()
# err_msg = 'shear flow velocity is must vertical to z axis. '
# assert self._planeShearRate[0, -1] == 0. and self._planeShearRate.size == 3, err_msg
err_msg = 'restriction: dot(planeShearRate, planeShearNorm)==0. '
assert np.isclose(np.dot(planeShearRate, planeShearNorm), 0), err_msg
return True
def get_planeShearRate(self):
return self._planeShearRate
class doubleletProblem(_GivenFlowProblem):
def _init_kwargs(self, **kwargs):
super()._init_kwargs(**kwargs)
self._doublelet = np.zeros(3)
self.set_doublelet(kwargs['doublelet'])
return True
def get_given_flow(self, obj):
given_u = super().get_given_flow(obj)
# in this case, background flow is a given shear flow
u_weight = self._planeShearRate # for shear flow
ugeo = obj.get_u_geo()
given_u = given_u + np.dot(ugeo.get_nodes()[:, 2].reshape((-1, 1)), u_weight).flatten()
return given_u
def set_doublelet(self, doublelet):
self._doublelet = np.array(doublelet).reshape((1, 3))
return True
def get_doublelet(self):
return self._doublelet
class FreeVortexProblem(_GivenFlowProblem):
# assume vortex is in XY plane
def _init_kwargs(self, **kwargs):
super()._init_kwargs(**kwargs)
self._vortexStrength = kwargs['vortexStrength']
return True
def get_given_flow(self, obj):
given_u = super().get_given_flow(obj)
# in this case, background flow is a given Free Vortex flow
phi, rho, z = obj.get_u_geo().get_polar_coord()
u_phi = self._vortexStrength / (2 * np.pi * rho)
given_u = given_u + np.dstack((-u_phi * np.sin(phi),
u_phi * np.cos(phi),
np.zeros_like(phi))).flatten()
return given_u
def get_vortexStrength(self):
return self._vortexStrength
class LambOseenVortexProblem(_GivenFlowProblem):
# assume vortex is in XY plane
def _init_kwargs(self, **kwargs):
super()._init_kwargs(**kwargs)
self._vortexStrength = kwargs['vortexStrength']
return True
def get_given_flow(self, obj):
given_u = super().get_given_flow(obj)
# in this case, background flow is a given Free Vortex flow
phi, rho, z = obj.get_u_geo().get_polar_coord()
u_phi = self._vortexStrength / (2 * np.pi * rho) * (1 - np.exp(-rho ** 2 / 4))
# u_phi = self._vortexStrength / (2 * np.pi * rho) * (1 - np.exp(-rho ** 2))
given_u = given_u + np.dstack((-u_phi * np.sin(phi),
u_phi * np.cos(phi),
np.zeros_like(phi))).flatten()
return given_u
def set_vortexStrength(self, vortexStrength):
self._vortexStrength = vortexStrength
return True
def get_vortexStrength(self):
return self._vortexStrength
class StokesletsFlowProblem(_GivenFlowProblem):
def _init_kwargs(self, **kwargs):
super()._init_kwargs(**kwargs)
self._StokesletsStrength = np.array(kwargs['StokesletsStrength']).reshape((1, 3)).flatten()
return True
def get_given_flow(self, obj):
given_u = super().get_given_flow(obj)
# in this case, background flow is a given shear flow
given_u_fun = lambda x0, x1, x2, f0, f1, f2: np.array(
[f0 * x0 ** 2 * (x0 ** 2 + x1 ** 2 + x2 ** 2) ** (-1.5) +
f0 * (x0 ** 2 + x1 ** 2 + x2 ** 2) ** (-0.5) +
f1 * x0 * x1 * (x0 ** 2 + x1 ** 2 + x2 ** 2) ** (-1.5) +
f2 * x0 * x2 * (x0 ** 2 + x1 ** 2 + x2 ** 2) ** (-1.5),
f0 * x0 * x1 * (x0 ** 2 + x1 ** 2 + x2 ** 2) ** (-1.5) +
f1 * x1 ** 2 * (x0 ** 2 + x1 ** 2 + x2 ** 2) ** (-1.5) +
f1 * (x0 ** 2 + x1 ** 2 + x2 ** 2) ** (-0.5) +
f2 * x1 * x2 * (x0 ** 2 + x1 ** 2 + x2 ** 2) ** (-1.5),
f0 * x0 * x2 * (x0 ** 2 + x1 ** 2 + x2 ** 2) ** (-1.5) +
f1 * x1 * x2 * (x0 ** 2 + x1 ** 2 + x2 ** 2) ** (-1.5) +
f2 * x2 ** 2 * (x0 ** 2 + x1 ** 2 + x2 ** 2) ** (-1.5) +
f2 * (x0 ** 2 + x1 ** 2 + x2 ** 2) ** (-0.5)]) / (8 * np.pi)
unodes = obj.get_u_geo().get_nodes()
given_u = given_u + given_u_fun(*unodes, *self._StokesletsStrength)
return given_u
class PoiseuilleFlowProblem(_GivenFlowProblem):
def _init_kwargs(self, **kwargs):
super()._init_kwargs(**kwargs)
self._PoiseuilleStrength = kwargs['PoiseuilleStrength']
return True
def get_given_flow(self, obj):
given_u = super().get_given_flow(obj)
# in this case, background flow is a given Poiseuille flow
u_weight = self._PoiseuilleStrength # for Poiseuille flow
tgeo = obj.get_u_geo()
_, rho, _ = tgeo.get_polar_coord()
given_z = (1 - rho ** 2) * u_weight
given_u = given_u + np.dstack((np.zeros_like(given_z), np.zeros_like(given_z),
given_z,)).flatten()
return given_u
class FundSoltObj(StokesFlowObj):
def __init__(self):
super(FundSoltObj, self).__init__()
# each element contain two vectors and a type ((x1,2,3), (f1,2,3), StokesletsHandle)
self._point_force_list = []
# the following properties store the location history of the composite.
self._force_norm_hist = [] # [[force1 hist], [force2 hist]...]
def add_point_force(self, location: np.ndarray, force: np.ndarray,
StokesletsHandle=light_stokeslets_matrix_3d):
err_msg = 'both location and force are vectors with shape (3, 0)'
assert location.shape == (3,) and force.shape == (3,), err_msg
self._point_force_list.append((location, force, StokesletsHandle))
self._force_norm_hist.append([])
return True
def get_point_force_list(self):
return self._point_force_list
def dbg_set_point_force_list(self, point_force_list):
self._point_force_list = point_force_list
return True
def move(self, displacement):
super(FundSoltObj, self).move(displacement)
t_list = []
for location, force, StokesletsHandle in self.get_point_force_list():
location = location + displacement
t_list.append((location, force, StokesletsHandle))
self._point_force_list = t_list
return True
def node_rotation(self, norm=np.array([0, 0, 1]), theta=0, rotation_origin=None):
# The rotation is counterclockwise
if rotation_origin is None:
rotation_origin = self.get_u_geo().get_origin()
else:
rotation_origin = np.array(rotation_origin).reshape((3,))
super(FundSoltObj, self).node_rotation(norm=norm, theta=theta,
rotation_origin=rotation_origin)
rot_mtx = get_rot_matrix(norm=norm, theta=theta)
t_list = []
for location0, force0, StokesletsHandle in self.get_point_force_list():
location = np.dot(rot_mtx, (location0 - rotation_origin)) + rotation_origin
force = np.dot(rot_mtx, (force0 + location0 - rotation_origin)) \
+ rotation_origin - location
t_list.append((location, force, StokesletsHandle))
self._point_force_list = t_list
return True
def update_location(self, eval_dt, print_handle=''):
super(FundSoltObj, self).update_location(eval_dt, print_handle)
for (location, _, _), t_hist in zip(self.get_point_force_list(), self._force_norm_hist):
t_hist.append(location)
return True
def get_force_norm_hist(self):
return self._force_norm_hist
class _GivenFlowForceFreeProblem(_GivenFlowProblem, ForceFreeProblem):
def _create_U(self):
# u_fi: velocity due to point force on the boundary, unknown;
# u_ref, w_ref: reference velocity of the composite, rigid body motion, unknown;
# u_rel, w_rel: relative velocity of each part, rigid body motion, known;
# u_bi: background flow velocity, known;
# u_ti: total velocity that keeps rigid body motion on the surfaces of each part.
# u_fi + u_bi = u_ti = u_ref + w_ref × ri + u_rel + w_rel × ri .
# u_fi - u_ref - w_ref × ri = u_rel + w_rel × ri - u_bi
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
velocity = self._u_pkg.createGlobalVector()
velocity.zeroEntries()
for obj0 in self.get_obj_list():
if isinstance(obj0, ForceFreeComposite):
center = obj0.get_center()
for sub_obj, rel_U in zip(obj0.get_obj_list(), obj0.get_rel_U_list()):
sub_nodes = sub_obj.get_u_geo().get_nodes()
# sub_obj.show_velocity(length_factor=0.1, show_nodes=True)
r = sub_nodes - center
t_u = (rel_U[:3] + np.cross(rel_U[3:], r)).flatten()
_, u_glbIdx_all = sub_obj.get_u_geo().get_glbIdx()
givenU = self.get_given_flow(sub_obj)
if rank == 0:
velocity[u_glbIdx_all] = t_u[:] - givenU
# # dbg
# t_list = []
# for sub_obj, rel_U in zip(obj0.get_obj_list(), obj0.get_rel_U_list()):
# givenU = self.get_given_flow(sub_obj)
# t_geo = sub_obj.get_u_geo().copy()
# t_geo.set_velocity(givenU)
# t_list.append(t_geo)
# t_geo2 = geo()
# t_geo2.combine(t_list)
# t_geo2.show_velocity()
else:
u0 = obj0.get_velocity()
_, u_glbIdx_all = obj0.get_u_geo().get_glbIdx()
givenU = self.get_given_flow(obj0)
if rank == 0:
velocity[u_glbIdx_all] = u0[:] - givenU
velocity.assemble()
self._velocity_petsc = velocity
return True
class _GivenFlowForceFreeIterateProblem(_GivenFlowProblem, ForceFreeIterateProblem):
def _create_U(self):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
velocity = self._u_pkg.createGlobalVector()
velocity.zeroEntries()
for obj0 in self.get_obj_list():
if isinstance(obj0, ForceFreeComposite):
center = obj0.get_center()
for sub_obj, rel_U in zip(obj0.get_obj_list(), obj0.get_rel_U_list()):
sub_nodes = sub_obj.get_u_geo().get_nodes()
# sub_obj.show_velocity(length_factor=0.1, show_nodes=True)
r = sub_nodes - center
abs_U = rel_U + obj0.get_ref_U()
t_u = (abs_U[:3] + np.cross(abs_U[3:], r)).flatten()
_, u_glbIdx_all = sub_obj.get_u_geo().get_glbIdx()
givenU = self.get_given_flow(sub_obj)
if rank == 0:
velocity[u_glbIdx_all] = t_u[:] - givenU
else:
u0 = obj0.get_velocity()
_, u_glbIdx_all = obj0.get_u_geo().get_glbIdx()
givenU = self.get_given_flow(obj0)
if rank == 0:
velocity[u_glbIdx_all] = u0[:] - givenU
velocity.assemble()
self._velocity_petsc = velocity
return True
class ShearFlowForceFreeProblem(ShearFlowProblem, _GivenFlowForceFreeProblem):
def _nothing(self):
pass
class ShearFlowForceFreeIterateProblem(ShearFlowProblem, _GivenFlowForceFreeIterateProblem):
def _nothing(self):
pass
class StrainRateBaseForceFreeProblem(StrainRateBaseProblem, _GivenFlowForceFreeProblem):
def _nothing(self):
pass
class StrainRateBaseForceFreeIterateProblem(StrainRateBaseProblem,
_GivenFlowForceFreeIterateProblem):
def _nothing(self):
pass
class FreeVortexForceFreeProblem(FreeVortexProblem, _GivenFlowForceFreeProblem):
def _nothing(self):
pass
class FreeVortexForceFreeIterateProblem(FreeVortexProblem, _GivenFlowForceFreeIterateProblem):
def _nothing(self):
pass
class LambOseenVortexForceFreeProblem(LambOseenVortexProblem, _GivenFlowForceFreeProblem):
def _nothing(self):
pass
class LambOseenVortexForceFreeIterateProblem(LambOseenVortexProblem,
_GivenFlowForceFreeIterateProblem):
def _nothing(self):
pass
class DualPotentialProblem(StokesFlowProblem):
def __init__(self, **kwargs):
super(DualPotentialProblem, self).__init__(**kwargs)
self._n_unknown = 4
def _create_U(self):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
velocity = self._u_pkg.createGlobalVector()
velocity.zeroEntries()
for i0, obj0 in enumerate(self.get_obj_list()):
u0 = np.vstack((obj0.get_velocity().reshape(-1, 3).T,
np.zeros(obj0.get_n_f_node()))).flatten(order='F')
_, u_glbIdx_all = obj0.get_u_geo().get_glbIdx()
if rank == 0:
velocity[u_glbIdx_all] = u0[:]
velocity.assemble()
self._velocity_petsc = velocity
return True
class DualPotentialObj(StokesFlowObj):
def __init__(self):
super(DualPotentialObj, self).__init__()
self._n_unknown = 4
class GivenTorqueIterateVelocity1DProblem(StokesFlowProblem):
def __init__(self, axis='z', givenF=0, **kwargs):
super().__init__(**kwargs)
err_msg = 'wrong symmetrical axis, it should be one of (x, y, z). '
assert axis in ('x', 'y', 'z'), err_msg
self._axis = axis # symmetrical axis
self._iterObj = []
self._givenF = givenF[2]
def set_iterate_obj(self, obj_list):
# set objects that varying their velocity to reach force free condition.
# other object in the problem have given velocity.
self._iterObj = list(tube_flatten((obj_list,)))
return True
def solve_sumForce(self, U, W=1, center=np.zeros(3)):
axis = self._axis # type: str
# assert 1 == 2, 'check center of the tobj.set_rigid_velocity()'
if axis == 'x':
for tobj in self._iterObj:
tobj.set_rigid_velocity((U, 0, 0, W, 0, 0))
elif axis == 'y':
for tobj in self._iterObj:
tobj.set_rigid_velocity((0, U, 0, 0, W, 0))
elif axis == 'z':
for tobj in self._iterObj:
tobj.set_rigid_velocity((0, 0, U, 0, 0, W))
self.create_F_U()
self.solve()
sum_force = np.sum([tobj.get_total_force(center=center) for tobj in self._iterObj], axis=0)
if axis == 'x':
tf = sum_force[0]
elif axis == 'y':
tf = sum_force[1]
elif axis == 'z':
tf = sum_force[2]
return tf
def each_iterate(self, u0, u1):
f0 = self.solve_sumForce(u0, W=1)
f1 = self.solve_sumForce(u1, W=1)
u2 = (u0 - u1) / (f0 - f1) * (self._givenF - f0) + u0
PETSc.Sys.Print(' u0=%f, u1=%f, f0=%f, f1=%f, u2=%f' % (u0, u1, f0, f1, u2))
return f0, f1, u2
def do_iterate(self, tolerate=1e-3, max_it=1000):
f_reference = self.solve_sumForce(U=1, W=0)
u0 = 0
u1 = 1
tol = tolerate * 100
n_it = 0 # # of iterate
while tol > tolerate and n_it < max_it:
f0, f1, u2 = self.each_iterate(u0, u1)
u0, u1 = u1, u2
tol = np.abs(f1 / f_reference)
n_it = n_it + 1
return u0, tol
class _GivenForceGivenFlowProblem(GivenForceProblem, _GivenFlowProblem):
def _create_U(self):
# u_fi: velocity due to point force on the boundary, unknown;
# u_ref: reference velocity of the composite, rigid body motion, unknown;
# u_rel: relative velocity of each part, rigid body motion, known;
# u_bi: background flow velocity, known;
# u_ti: total velocity that keeps rigid body motion on the surfaces of each part.
# u_fi + u_bi = u_ti = u_ref + u_rel.
# u_fi - u_ref = u_rel - u_bi
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
ffweight = self._ffweigth
velocity = self._u_pkg.createGlobalVector()
velocity.zeroEntries()
for obj0 in self.get_obj_list():
if isinstance(obj0, ForceFreeComposite):
center = obj0.get_center()
for sub_obj, rel_U in zip(obj0.get_obj_list(), obj0.get_rel_U_list()):
sub_nodes = sub_obj.get_u_geo().get_nodes()
u_bi = self.get_given_flow(sub_obj)
# sub_obj.show_velocity(length_factor=0.1, show_nodes=True)
r = sub_nodes - center
t_u = (rel_U[:3] + np.cross(rel_U[3:], r)).flatten() - u_bi
_, u_glbIdx_all = sub_obj.get_u_geo().get_glbIdx()
if rank == 0:
velocity[u_glbIdx_all] = t_u[:]
if isinstance(obj0, GivenForceComposite):
_, u_glbIdx_all = obj0.get_u_glbIdx()
givenF = obj0.get_givenF() * (
[-1] * 3 + [1] * 3) # sum(-1*F)=-F_give, sum(r*F)=T_give
if rank == 0:
velocity[u_glbIdx_all] = givenF * ffweight
else:
u0 = obj0.get_velocity()
u_bi = self.get_given_flow(obj0)
_, u_glbIdx_all = obj0.get_u_geo().get_glbIdx()
if rank == 0:
velocity[u_glbIdx_all] = u0[:] - u_bi
velocity.assemble()
self._velocity_petsc = velocity
return True
def _resolve_velocity(self, ksp):
# ksp_norm = super(GivenForceProblem, self)._resolve_velocity(ksp)
ksp_norm = GivenForceProblem._resolve_velocity(self, ksp)
for obj0 in self.get_all_obj_list():
given_u = self.get_given_flow(obj0)
obj0.set_re_velocity(obj0.get_re_velocity() + given_u)
return ksp_norm
class _GivenTorqueGivenVelocityGivenFlowProblem(GivenForceProblem, _GivenFlowProblem):
def _init_kwargs(self, **kwargs):
StokesFlowProblem._init_kwargs(self, **kwargs)
ffweightT = kwargs['ffweightT'] / kwargs['zoom_factor']
self._ffweigth = np.array([ffweightT ** 2, ffweightT ** 2, ffweightT ** 2])
err_msg = ' # IMPORTANT!!! _ffweigth[0]==_ffweigth[1]==_ffweigth[2]'
assert self._ffweigth[0] == self._ffweigth[1] == self._ffweigth[2], err_msg
PETSc.Sys.Print('-->absolute force free weight %s ' % self._ffweigth)
return True
def _create_U(self):
# u_fi: velocity due to point force on the boundary, unknown;
# u_ref: reference translation velocity of the composite, rigid body motion, unknown;
# w_ref: reference rotation velocity of the composite, rigid body motion, unknown;
# u_rel: relative translation velocity of each part, rigid body motion, known;
# w_rel: relative rotation velocity of each part, rigid body motion, known;
# u_bi: velocity due to background flow, known;
# u_ti: total velocity that keeps rigid body motion on the surfaces of each part.
# ri: location of each point.
# u_fi + u_bi = u_ti = u_ref + w_ref×ri + u_rel + w_rel×ri.
# u_fi + ri×w_ref(=-w_ref×ri) = u_ref + u_rel + w_rel×ri - u_bi
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
ffweight = self._ffweigth
velocity = self._u_pkg.createGlobalVector()
velocity.zeroEntries()
for obj0 in self.get_obj_list():
if isinstance(obj0, GivenTorqueComposite):
center = obj0.get_center()
ref_U = obj0.get_givenU()
for sub_obj, rel_U in zip(obj0.get_obj_list(), obj0.get_rel_U_list()):
sub_nodes = sub_obj.get_u_geo().get_nodes()
u_bi = self.get_given_flow(sub_obj)
# sub_obj.show_velocity(length_factor=0.1, show_nodes=True)
r = sub_nodes - center
t_u = (rel_U[:3] + np.cross(rel_U[3:], r) + ref_U).flatten() - u_bi
_, u_glbIdx_all = sub_obj.get_u_geo().get_glbIdx()
if rank == 0:
velocity[u_glbIdx_all] = t_u[:]
_, u_glbIdx_all = obj0.get_u_glbIdx()
givenT = obj0.get_givenT() # sum(r*F)=T_give
if rank == 0:
velocity[u_glbIdx_all] = givenT * ffweight
else:
u0 = obj0.get_velocity()
u_bi = self.get_given_flow(obj0)
_, u_glbIdx_all = obj0.get_u_geo().get_glbIdx()
if rank == 0:
velocity[u_glbIdx_all] = u0[:] - u_bi
velocity.assemble()
self._velocity_petsc = velocity
return True
def _resolve_velocity(self, ksp):
# self._re_velocity = u_ref + u_rel + w_rel×ri - u_bi
# self._re_velocity + w_ref×ri + u_bi = u_ref + w_ref×ri + u_rel + w_rel×ri
ffweight = self._ffweigth
re_velocity_petsc = self._M_petsc.createVecLeft()
# re_velocity_petsc.set(0)
self._M_petsc.mult(self._force_petsc, re_velocity_petsc)
self._re_velocity = self.vec_scatter(re_velocity_petsc)
for obj0 in self.get_obj_list():
if isinstance(obj0, GivenTorqueComposite):
ref_U = obj0.get_ref_U()
center = obj0.get_center()
# re_sum = 0
for sub_obj in obj0.get_obj_list():
u_b = self.get_given_flow(sub_obj)
_, u_glbIdx_all = sub_obj.get_u_geo().get_glbIdx()
re_rel_U = self._re_velocity[u_glbIdx_all]
sub_nodes = sub_obj.get_u_geo().get_nodes()
r = sub_nodes - center
t_u = (ref_U[:3] + np.cross(ref_U[3:], r)).flatten()
re_abs_U = t_u + re_rel_U + u_b
sub_obj.set_re_velocity(re_abs_U)
# re_sum = re_sum + sub_obj.get_total_force(center=center)
# obj0.set_total_force(re_sum) # torque free, analytically they are zero.
else:
_, u_glbIdx_all = obj0.get_u_geo().get_glbIdx()
u_b = self.get_given_flow(obj0)
obj0.set_re_velocity(self._re_velocity[u_glbIdx_all] + u_b)
self._finish_solve = True
return ksp.getResidualNorm()
def set_force_free(self):
ffweight = self._ffweigth
err_msg = 'self._M_petsc is NOT assembled'
assert self._M_petsc.isAssembled(), err_msg
for obj1 in self.get_obj_list():
if isinstance(obj1, GivenTorqueComposite):
center = obj1.get_center()
_, u_glbIdx_all = obj1.get_u_glbIdx()
_, f_glbIdx_all = obj1.get_f_glbIdx()
# self._M_petsc.zeroRows(u_glbIdx_all)
# self._M_petsc.setValues(u_glbIdx_all, range(f_size), np.zeros(f_size), addv=False)
# self._M_petsc.setValues(range(u_size), f_glbIdx_all, np.zeros(u_size), addv=False)
for sub_obj in obj1.get_obj_list():
r_u = sub_obj.get_u_geo().get_nodes() - center
r_f = sub_obj.get_f_geo().get_nodes() - center
tmu = np.vstack([((0, -ri[2], ri[1]),
(ri[2], 0, -ri[0]),
(-ri[1], ri[0], 0))
for ri in r_u]) * ffweight[0]
tmf = np.hstack([((0, -ri[2], ri[1]),
(ri[2], 0, -ri[0]),
(-ri[1], ri[0], 0))
for ri in r_f]) * ffweight[0]
_, sub_u_glbIdx_all = sub_obj.get_u_geo().get_glbIdx()
_, sub_f_glbIdx_all = sub_obj.get_f_geo().get_glbIdx()
self._M_petsc.setValues(sub_u_glbIdx_all, f_glbIdx_all, tmu, addv=False)
self._M_petsc.setValues(u_glbIdx_all, sub_f_glbIdx_all, tmf, addv=False)
# # dbg
# PETSc.Sys.Print(sub_u_glbIdx_all, f_glbIdx_all)
self._M_petsc.assemble()
return True
class GivenForceShearFlowProblem(_GivenForceGivenFlowProblem, ShearFlowProblem):
def _init_kwargs(self, **kwargs):
GivenForceProblem._init_kwargs(self, **kwargs)
ShearFlowProblem._init_kwargs(self, **kwargs)
# super(GivenForceProblem, self)._init_kwargs(**kwargs)
# super(ShearFlowProblem, self)._init_kwargs(**kwargs)
return True
class GivenForcePoiseuilleFlowProblem(_GivenForceGivenFlowProblem, PoiseuilleFlowProblem):
def _init_kwargs(self, **kwargs):
GivenForceProblem._init_kwargs(self, **kwargs)
PoiseuilleFlowProblem._init_kwargs(self, **kwargs)
# super(GivenForceProblem, self)._init_kwargs(**kwargs)
# super()._init_kwargs(**kwargs)
return True
class GivenTorqueGivenVelocityShearFlowProblem(_GivenTorqueGivenVelocityGivenFlowProblem,
ShearFlowProblem):
def _nothing(self):
pass
def _init_kwargs(self, **kwargs):
_GivenTorqueGivenVelocityGivenFlowProblem._init_kwargs(self, **kwargs)
ShearFlowProblem._init_kwargs(self, **kwargs)
return True
problem_dic = {
'rs': StokesFlowProblem,
'rs_plane': StokesFlowProblem,
'lg_rs': StokesFlowProblem,
'tp_rs': StokesFlowProblem,
'pf': StokesFlowProblem,
'pf_dualPotential': DualPotentialProblem,
'rs_stokesletsInPipe': StokesletsInPipeProblem,
'pf_stokesletsInPipe': StokesletsInPipeProblem,
'pf_stokesletsInPipeforcefree': StokesletsInPipeforcefreeProblem,
'pf_stokesletsTwoPlane': StokesletsTwoPlaneProblem,
'pf_infhelix': StokesFlowProblem,
'pf_selfRepeat': SelfRepeatHlxProblem,
'pf_selfRotate': SelfRotateProblem,
'rs_selfRotate': SelfRotateProblem,
'lg_rs_selfRotate': SelfRotateProblem,
}
obj_dic = {
'rs': StokesFlowObj,
'rs_plane': StokesFlowObj,
'lg_rs': StokesFlowObj,
'tp_rs': StokesFlowObj,
'pf': StokesFlowObj,
'pf_dualPotential': DualPotentialObj,
'rs_stokesletsInPipe': StokesFlowObj,
'pf_stokesletsInPipe': StokesFlowObj,
'pf_stokesletsInPipeforcefree': StokesFlowObj,
'pf_stokesletsTwoPlane': StokesFlowObj,
'pf_infhelix': StokesFlowObj,
'pf_stokesletsRingInPipe': StokesFlowRingObj,
'pf_selfRepeat': SelfRepeatObj,
'pf_selfRotate': SelfRotateObj,
'rs_selfRotate': SelfRotateObj,
'lg_rs_selfRotate': SelfRotateObj,
'KRJ_slb': StokesFlowObj,
'lightill_slb': StokesFlowObj,
}
# names of models that need two geometries.
two_geo_method_list = ('pf', 'ps', 'ps_ds', 'pf_ds',
'pf_stokesletsInPipe', 'pf_stokesletsInPipeforcefree',
'pf_stokesletsTwoPlane', 'pf_infhelix',
'pf_ShearFlow',
'pf_dualPotential',
'pf_stokesletsRingInPipe',)
| mit |
heprom/pymicro | examples/plotting/Au_6grains_pole_figure.py | 1 | 1713 | #!/usr/bin/env python
import os, numpy as np
from pymicro.crystal.microstructure import Microstructure, Grain, Orientation
from pymicro.crystal.texture import PoleFigure
from matplotlib import pyplot as plt, colors, cm
if __name__ == '__main__':
'''
Pole figure of a gold sample containing 6 grains with a strong <111> fiber texture.
A Microstructure object is first created with the 6 grains of interest.
The grain ids corerespond to the actual grain number (in an EBSD scan for instance).
A PoleFigure object is then created using this microstructure and the pole figures
(both direct and inverse) are drawn by calling the plot_pole_figures() method.
'''
micro = Microstructure(name='Au_6grains', overwrite_hdf5=True)
micro.autodelete = True
gid_list = [1158, 1349, 1585, 1805, 1833, 2268]
euler_list = [(344.776, 52.2589, 53.9933),
(344.899, 125.961, 217.330),
(228.039, 57.4791, 143.171),
(186.741, 60.333, 43.311),
(151.709, 55.0406, 44.1051),
(237.262, 125.149, 225.615),
]
micro.add_grains(euler_list, grain_ids=gid_list)
# create pole figure (both direct and inverse)
pf = PoleFigure(hkl='111', axis='Z', proj='stereo', microstructure=micro)
pf.mksize = 100
pf.set_map_field('grain_id')
pf.pflegend = True # this works well for a few grains
pf.plot_pole_figures(plot_sst=True, display=False, save_as='png')
del pf
del micro
image_name = os.path.splitext(__file__)[0] + '.png'
print('writing %s' % image_name)
from matplotlib import image
image.thumbnail(image_name, 'thumb_' + image_name, 0.2)
| mit |
awduda/awduda.github.io | index.py | 1 | 2096 | from flask import Flask,url_for,request
from flask import render_template
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.sql.expression import text
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.dialects.postgresql import JSON
import os
import json
import sklearn
import psycopg2 as pg
import pandas.io.sql as psql
from sklearn import linear_model
from sqlalchemy import create_engine
from model import training,app
@app.route('/')
def main_index():
return render_template('index.html')
@app.route('/classify/')
def classifier():
return render_template('classifier.html')
@app.route('/contribute/')
def contribute():
return render_template('contribute.html')
@app.route('/classify_data/')
def classify_data():
if request.method=='POST':
data=json.loads(request.get_json())
red_c_gs=data['red_c_gs']
green_c_gs=data['green_c_gs']
blue_c_gs=data['blue_c_gs']
color_c=data['color_c']
background_type=data['background_type']
engine = create_engine(DATABASE_URL)
dataframe = psql.frame_query("SELECT * FROM training", engine)
x = dataframe['color_c']
y = train_data['num_layers']
x = x.reshape(-1,1)
y = y.reshape(-1,1)
model = linear_model.LinearRegression()
model.fit(x, y)
return dataframe.head()
@app.route('/add_training/' , methods=['POST', 'GET'])
def training_add():
if request.method == 'POST':
data=json.loads(request.get_json())
red_c_gs=data['red_c_gs']
green_c_gs=data['green_c_gs']
blue_c_gs=data['blue_c_gs']
color_c=data['color_c']
background_type=data['background_type']
num_layers=data['num_layers']
t=training(red_c_gs,green_c_gs,blue_c_gs,color_c,background_type,num_layers)
q=t.add(t)
return("hi")
else:
return("no")
if __name__ == '__main__':
app.debug = True
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
| mit |
JosmanPS/scikit-learn | examples/preprocessing/plot_function_transformer.py | 161 | 1949 | """
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
plt.scatter(X[:, 0], X[:, 1], c=y, s=50)
plt.show()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
s=50,
)
plt.show()
| bsd-3-clause |
sirca/bdkd_datastore | subprojects/laser/data/wsgi/laser_web.py | 1 | 11350 | # Copyright 2015 Nicta
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
import hashlib
import os, errno
import csv, json, StringIO
from multiprocessing import Process
from functools import wraps
import urllib
import matplotlib
matplotlib.use('Agg')
from bdkd.laser.data import Dataset
import bdkd.laser.plot as bdkd_plot
from PIL import Image
from flask import ( Flask, request, render_template, send_file,
make_response, abort, redirect)
CACHE_ROOT='/var/tmp'
CACHE_LOCATION='static/cache'
CACHE_SALT='55f329b5b9d620090e763a359e102eb0'
app = Flask(__name__)
def cache_key(*args):
"""
Generate a deterministic, hard-to-guess key for caching.
"""
cache_str = CACHE_SALT + ':'.join(str(x) for x in args)
return hashlib.sha1(cache_str).hexdigest()
def make_cache_dir(key):
"""
Make cache directory for key. Returns name of the directory.
The cache directory will be the cache root plus the first three characters
of the cache key. This serves to divide up the cache files into manageable
quantities per cache directory.
"""
cache_dirname = os.path.join(CACHE_LOCATION, key[0:3])
try:
os.makedirs(os.path.join(CACHE_ROOT, cache_dirname))
except OSError as e:
if e.errno != errno.EEXIST:
raise
return cache_dirname
def subprocess_plot(filename, target, args):
if not os.path.exists(filename):
p = Process(target=target, args=args)
p.start()
p.join()
else:
os.utime(filename, None)
def cache_time_series_plot(repository_name, dataset_name, x, y, time_series,
from_time, to_time, z_interval_base):
key = cache_key('time_series', repository_name, dataset_name, x, y,
from_time, to_time)
cache_dirname = make_cache_dir(key)
plot_location = os.path.join(cache_dirname, key + '.png')
plot_filename = os.path.join(CACHE_ROOT, plot_location)
subprocess_plot(filename=plot_filename,
target=bdkd_plot.render_time_series_plot,
args=(time_series, from_time, to_time, z_interval_base,
plot_filename))
return plot_location
def cache_phase_plot(repository_name, dataset_name, x, y,
from_time, to_time, time_series, time_series_selected, delay,
z_interval_base):
key = cache_key('phase', repository_name, dataset_name, x, y,
from_time, to_time, delay)
cache_dirname = make_cache_dir(key)
plot_location = os.path.join(cache_dirname, key + '.png')
plot_filename = os.path.join(CACHE_ROOT, plot_location)
print len(time_series_selected)
print from_time
print to_time
print delay
print z_interval_base
subprocess_plot(filename=plot_filename,
target=bdkd_plot.render_phase_plot,
args=(time_series, from_time, to_time, delay,
z_interval_base, plot_filename))
return plot_location
def cache_fft_plot(repository_name, dataset_name, x, y,
from_time, to_time, time_series, time_series_selected,
z_interval, z_peak_voltage):
key = cache_key('fft', repository_name, dataset_name, x, y,
from_time, to_time)
cache_dirname = make_cache_dir(key)
plot_location = os.path.join(cache_dirname, key + '.png')
plot_filename = os.path.join(CACHE_ROOT, plot_location)
subprocess_plot(filename=plot_filename,
target=bdkd_plot.render_fft_plot,
args=(time_series_selected, z_interval, z_peak_voltage,
plot_filename))
return plot_location
def open_dataset(f):
"""
Wrapper for routes using 'repository_name', 'dataset_name' and 'map_name',
to provide a dataset.
Uses the 'dataset_name' kwarg to open and provide a kwarg called 'dataset'.
If the dataset is not found, aborts with 404.
Furthermore, if a map_name kwarg is provided this is checked for existence
in the dataset. If it does not exist, 404 is returned.
"""
@wraps(f)
def wrapper(*args, **kwargs):
if 'repository_name' in kwargs and 'dataset_name' in kwargs:
try:
kwargs['dataset_name'] = urllib.unquote_plus(
kwargs['dataset_name'])
dataset = Dataset.open(kwargs['repository_name'],
kwargs['dataset_name'])
if not dataset:
abort(404)
except ValueError:
return redirect('/static/unsupported.html')
kwargs['dataset'] = dataset
if 'map_name' in kwargs:
if not kwargs['map_name'] in dataset.get_map_names():
abort(404)
return f(*args, **kwargs)
else:
abort(400)
return wrapper
def open_dataset_and_time_series(f):
"""
Wrapper for loading time series data. Provides 'y', 'x' and
'time_series' to the kwargs.
Uses the @open_dataset decorator to ensure a valid dataset first. Then it
relies on 'y' and 'x' to be provided in the request args
(otherwise 400). If the time series exists, it will be provided in the
kwargs as time_series; otherwise 404.
The 'from_time' and 'to_time' are the times (in picoseconds) of the
selected timeseries -- defaulting to 0 and the last value in the timeseries
(respectively). If provided, these figures will be rounded down and up
resp. to the nearest 50ps interval.
"""
@wraps(f)
@open_dataset
def wrapper(*args, **kwargs):
dataset = kwargs['dataset']
if not 'y' in request.args or not 'x' in request.args:
abort(400)
x = int(request.args.get('x', 0))
y = int(request.args.get('y', 0))
interval = dataset.z_interval_base
time_series = dataset.get_time_series(x, y)
if time_series == None or len(time_series) == 0:
abort(404)
from_time = int(request.args.get('from', 0))
to_time = int(request.args.get('to', (len(time_series) - 1) * interval))
from_idx = (from_time // interval)
to_idx = -(-to_time // interval)
kwargs['y'] = y
kwargs['x'] = x
kwargs['time_series'] = time_series
kwargs['time_series_selected'] = time_series[from_idx:to_idx]
kwargs['from_time'] = from_idx * interval
kwargs['to_time'] = to_idx * interval
return f(*args, **kwargs)
return wrapper
@app.route("/repositories/<repository_name>/datasets")
def get_datasets(repository_name):
dataset_names = Dataset.list(repository_name, None)
if not dataset_names:
abort(404)
return json.dumps(dataset_names)
@app.route("/repositories/<repository_name>"
"/datasets/<path:dataset_name>/readme")
@open_dataset
def get_readme(repository_name, dataset_name, dataset):
readme_txt = dataset.get_readme()
if readme_txt:
return readme_txt
else:
abort(404)
@app.route("/repositories/<repository_name>"
"/datasets/<path:dataset_name>/map_names")
@open_dataset
def get_map_names(repository_name, dataset_name, dataset):
map_names = dataset.get_map_names(include_variables=False)
return json.dumps(map_names)
@app.route("/repositories/<repository_name>"
"/datasets/<path:dataset_name>"
"/map_data/<map_name>")
@open_dataset
def get_map_data(repository_name, dataset_name, dataset, map_name):
map_data = dataset.get_map_and_variables_data(map_name)
if map_data != None:
return json.dumps(map_data)
else:
abort(404)
@app.route("/repositories/<repository_name>"
"/datasets/<path:dataset_name>/time_series_plots")
@open_dataset_and_time_series
def get_time_series_plot(repository_name, dataset_name, dataset, x, y,
from_time, to_time, time_series, time_series_selected):
cache_path = cache_time_series_plot(repository_name, dataset_name,
x, y, time_series_selected, from_time, to_time,
dataset.z_interval_base)
return redirect(cache_path, code=302)
@app.route("/repositories/<repository_name>"
"/datasets/<path:dataset_name>/time_series_data")
@open_dataset_and_time_series
def get_time_series_data(repository_name, dataset_name, dataset, x, y,
from_time, to_time, time_series, time_series_selected):
output = StringIO.StringIO()
output.writelines(["{0}\n".format(str(val)) for val in time_series_selected])
output.seek(0)
return send_file(output,
attachment_filename="X_{0:03d}_Y_{1:03d}_{2}_{3}.csv".format(
x, y, from_time, to_time), mimetype='text/csv',
as_attachment=True)
@app.route("/repositories/<repository_name>"
"/datasets/<path:dataset_name>"
"/phase_plots")
@open_dataset_and_time_series
def get_phase_plot(repository_name, dataset_name, dataset, x, y,
from_time, to_time, time_series, time_series_selected):
delay = int(request.args.get('delay', 1))
print time_series_selected
cache_path = cache_phase_plot(repository_name, dataset_name, x, y,
from_time, to_time, time_series, time_series_selected, delay,
dataset.z_interval_base)
return redirect(cache_path, code=302)
@app.route("/repositories/<repository_name>"
"/datasets/<path:dataset_name>/fft_data")
@open_dataset_and_time_series
def get_fft_data(repository_name, dataset_name, dataset, x, y,
from_time, to_time, time_series, time_series_selected):
(freq, dBm) = bdkd_plot.time_series_fft(time_series_selected,
dataset.z_interval, dataset.z_peak_voltage)
return json.dumps({'fftfreq': freq.tolist(),
'fft_real': dBm.real.data.tolist(),
'fft_imag': dBm.imag.data.tolist() })
@app.route("/repositories/<repository_name>"
"/datasets/<path:dataset_name>/fft_plots")
@open_dataset_and_time_series
def get_fft_plot(repository_name, dataset_name, dataset, x, y,
from_time, to_time, time_series, time_series_selected):
delay = int(request.args.get('delay', 1))
cache_path = cache_fft_plot(repository_name, dataset_name, x, y,
from_time, to_time, time_series, time_series_selected,
dataset.z_interval, dataset.z_peak_voltage)
return redirect(cache_path, code=302)
@app.route("/repositories/<repository_name>"
"/datasets/<path:dataset_name>/")
@open_dataset
def view_dataset(repository_name, dataset_name, dataset):
return render_template('resource.html',
repository_name=repository_name,
dataset_name=dataset_name,
dataset=dataset,
)
@app.route("/")
def index():
return render_template('index.html')
if __name__=="__main__":
# Dev mode: allow Flask to serve the cache from the static directory.
CACHE_ROOT='./'
app.run(host='0.0.0.0', debug = True)
| apache-2.0 |
herilalaina/scikit-learn | examples/decomposition/plot_ica_vs_pca.py | 59 | 3329 | """
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
# #############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
# #############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| bsd-3-clause |
dingmingliu/quanttrade | quanttrade/bt/algos.py | 1 | 36136 | """
A collection of Algos used to create Strategy logic.
"""
import bt
from bt.core import Algo, AlgoStack
import pandas as pd
import numpy as np
import random
def run_always(f):
"""
Run always decorator to be used with Algo
to ensure stack runs the decorated Algo
on each pass, regardless of failures in the stack.
"""
f.run_always = True
return f
class PrintDate(Algo):
"""
This Algo simply print's the current date.
Can be useful for debugging purposes.
"""
def __call__(self, target):
print target.now
return True
class PrintTempData(Algo):
"""
This Algo prints the temp data.
Useful for debugging.
"""
def __call__(self, target):
print target.temp
return True
class PrintInfo(Algo):
"""
Prints out info associated with the target strategy. Useful for debugging
purposes.
Args:
* fmt_string (str): A string that will later be formatted with the
target object's __dict__ attribute. Therefore, you should provide
what you want to examine within curly braces ( { } )
Ex:
PrintInfo('Strategy {name} : {now}')
This will print out the name and the date (now) on each call.
Basically, you provide a string that will be formatted with target.__dict__
"""
def __init__(self, fmt_string='{full_name} {now}'):
self.fmt_string = fmt_string
def __call__(self, target):
print self.fmt_string.format(target.__dict__)
return True
class Debug(Algo):
"""
Utility Algo that calls pdb.set_trace when triggered.
In the debug session, target is available and can be examined.
"""
def __call__(self, target):
import pdb
pdb.set_trace()
return True
class RunOnce(Algo):
"""
Returns True on first run then returns False.
As the name says, the algo only runs once. Useful in situations
where we want to run the logic once (buy and hold for example).
"""
def __init__(self):
super(RunOnce, self).__init__()
self.has_run = False
def __call__(self, target):
# if it hasn't run then we will
# run it and set flag
if not self.has_run:
self.has_run = True
return True
# return false to stop future execution
return False
class RunDaily(Algo):
"""
Returns True on day change.
Returns True if the target.now's day has changed
since the last run, if not returns False. Useful for
daily rebalancing strategies.
"""
def __init__(self):
super(RunDaily, self).__init__()
self.last_date = None
def __call__(self, target):
# get last date
now = target.now
# if none nothing to do - return false
if now is None:
return False
# create pandas.Timestamp for useful .week property
now = pd.Timestamp(now)
if self.last_date is None:
self.last_date = now
return False
result = False
if now.date() != self.last_date.date():
result = True
self.last_date = now
return result
class RunWeekly(Algo):
"""
Returns True on week change.
Returns True if the target.now's week has changed
since the last run, if not returns False. Useful for
weekly rebalancing strategies.
Note:
This algo will typically run on the first day of the
week (assuming we have daily data)
"""
def __init__(self):
super(RunWeekly, self).__init__()
self.last_date = None
def __call__(self, target):
# get last date
now = target.now
# if none nothing to do - return false
if now is None:
return False
# create pandas.Timestamp for useful .week property
now = pd.Timestamp(now)
if self.last_date is None:
self.last_date = now
return False
result = False
if now.week != self.last_date.week:
result = True
self.last_date = now
return result
class RunMonthly(Algo):
"""
Returns True on month change.
Returns True if the target.now's month has changed
since the last run, if not returns False. Useful for
monthly rebalancing strategies.
Note:
This algo will typically run on the first day of the
month (assuming we have daily data)
"""
def __init__(self):
super(RunMonthly, self).__init__()
self.last_date = None
def __call__(self, target):
# get last date
now = target.now
# if none nothing to do - return false
if now is None:
return False
if self.last_date is None:
self.last_date = now
return False
result = False
if now.month != self.last_date.month:
result = True
self.last_date = now
return result
class RunQuarterly(Algo):
"""
Returns True on quarter change.
Returns True if the target.now's month has changed
since the last run and the month is the first month
of the quarter, if not returns False. Useful for
quarterly rebalancing strategies.
Note:
This algo will typically run on the first day of the
quarter (assuming we have daily data)
"""
def __init__(self):
super(RunQuarterly, self).__init__()
self.last_date = None
def __call__(self, target):
# get last date
now = target.now
# if none nothing to do - return false
if now is None:
return False
if self.last_date is None:
self.last_date = now
return False
result = False
if now.quarter != self.last_date.quarter:
result = True
self.last_date = now
return result
class RunYearly(Algo):
"""
Returns True on year change.
Returns True if the target.now's year has changed
since the last run, if not returns False. Useful for
yearly rebalancing strategies.
Note:
This algo will typically run on the first day of the
year (assuming we have daily data)
"""
def __init__(self):
super(RunYearly, self).__init__()
self.last_date = None
def __call__(self, target):
# get last date
now = target.now
# if none nothing to do - return false
if now is None:
return False
if self.last_date is None:
self.last_date = now
return False
result = False
if now.year != self.last_date.year:
result = True
self.last_date = now
return result
class RunOnDate(Algo):
"""
Returns True on a specific set of dates.
Args:
* dates (list): List of dates to run Algo on.
"""
def __init__(self, *dates):
"""
Args:
* dates (*args): A list of dates. Dates will be parsed
by pandas.to_datetime so pass anything that it can
parse. Typically, you will pass a string 'yyyy-mm-dd'.
"""
super(RunOnDate, self).__init__()
# parse dates and save
self.dates = [pd.to_datetime(d) for d in dates]
def __call__(self, target):
return target.now in self.dates
class RunAfterDate(Algo):
"""
Returns True after a date has passed
Args:
* date: Date after which to start trading
Note:
This is useful for algos that rely on trailing averages where you
don't want to start trading until some amount of data has been built up
"""
def __init__(self, date):
"""
Args:
* date: Date after which to start trading
"""
super(RunAfterDate, self).__init__()
# parse dates and save
self.date = pd.to_datetime(date)
def __call__(self, target):
return target.now > self.date
class RunAfterDays(Algo):
"""
Returns True after a specific number of 'warmup' trading days have passed
Args:
* days (int): Number of trading days to wait before starting
Note:
This is useful for algos that rely on trailing averages where you
don't want to start trading until some amount of data has been built up
"""
def __init__(self, days):
"""
Args:
* days (int): Number of trading days to wait before starting
"""
super(RunAfterDays, self).__init__()
self.days = days
def __call__(self, target):
if self.days > 0:
self.days -= 1
return False
return True
class RunEveryNPeriods(Algo):
"""
This algo runs every n periods.
Args:
* n (int): Run each n periods
* offset (int): Applies to the first run. If 0, this algo will run the
first time it is called.
This Algo can be useful for the following type of strategy:
Each month, select the top 5 performers. Hold them for 3 months.
You could then create 3 strategies with different offsets and create a
master strategy that would allocate equal amounts of capital to each.
"""
def __init__(self, n, offset=0):
self.n = n
self.offset = offset
self.idx = n - offset - 1
self.lcall = 0
def __call__(self, target):
# ignore multiple calls on same period
if self.lcall == target.now:
return False
else:
self.lcall = target.now
# run when idx == (n-1)
if self.idx == (self.n - 1):
self.idx = 0
return True
else:
self.idx += 1
return False
class SelectAll(Algo):
"""
Sets temp['selected'] with all securities (based on universe).
Selects all the securities and saves them in temp['selected'].
By default, SelectAll does not include securities that have no
data (nan) on current date or those whose price is zero.
Args:
* include_no_data (bool): Include securities that do not have data?
Sets:
* selected
"""
def __init__(self, include_no_data=False):
super(SelectAll, self).__init__()
self.include_no_data = include_no_data
def __call__(self, target):
if self.include_no_data:
target.temp['selected'] = target.universe.columns
else:
universe = target.universe.ix[target.now].dropna()
target.temp['selected'] = list(universe[universe > 0].index)
return True
class SelectThese(Algo):
"""
Sets temp['selected'] with a set list of tickers.
Sets the temp['selected'] to a set list of tickers.
Args:
* ticker (list): List of tickers to select.
Sets:
* selected
"""
def __init__(self, tickers, include_no_data=False):
super(SelectThese, self).__init__()
self.tickers = tickers
self.include_no_data = include_no_data
def __call__(self, target):
if self.include_no_data:
target.temp['selected'] = self.tickers
else:
universe = target.universe[self.tickers].ix[target.now].dropna()
target.temp['selected'] = list(universe[universe > 0].index)
return True
class SelectHasData(Algo):
"""
Sets temp['selected'] based on all items in universe that meet
data requirements.
This is a more advanced version of SelectAll. Useful for selecting
tickers that need a certain amount of data for future algos to run
properly.
For example, if we need the items with 3 months of data or more,
we could use this Algo with a lookback period of 3 months.
When providing a lookback period, it is also wise to provide a min_count.
This is basically the number of data points needed within the lookback
period for a series to be considered valid. For example, in our 3 month
lookback above, we might want to specify the min_count as being
57 -> a typical trading month has give or take 20 trading days. If we
factor in some holidays, we can use 57 or 58. It's really up to you.
If you don't specify min_count, min_count will default to ffn's
get_num_days_required.
Args:
* lookback (DateOffset): A DateOffset that determines the lookback
period.
* min_count (int): Minimum number of days required for a series to be
considered valid. If not provided, ffn's get_num_days_required is
used to estimate the number of points required.
Sets:
* selected
"""
def __init__(self, lookback=pd.DateOffset(months=3),
min_count=None, include_no_data=False):
super(SelectHasData, self).__init__()
self.lookback = lookback
if min_count is None:
min_count = bt.ffn.get_num_days_required(lookback)
self.min_count = min_count
self.include_no_data = include_no_data
def __call__(self, target):
if 'selected' in target.temp:
selected = target.temp['selected']
else:
selected = target.universe.columns
filt = target.universe[selected].ix[target.now - self.lookback:]
cnt = filt.count()
cnt = cnt[cnt >= self.min_count]
if not self.include_no_data:
cnt = cnt[target.universe[selected].ix[target.now] > 0]
target.temp['selected'] = list(cnt.index)
return True
class SelectN(Algo):
"""
Sets temp['selected'] based on ranking temp['stat'].
Selects the top or botton N items based on temp['stat'].
This is usually some kind of metric that will be computed in a
previous Algo and will be used for ranking purposes. Can select
top or bottom N based on sort_descending parameter.
Args:
* n (int): select top n items.
* sort_descending (bool): Should the stat be sorted in descending order
before selecting the first n items?
* all_or_none (bool): If true, only populates temp['selected'] if we
have n items. If we have less than n, then temp['selected'] = [].
Sets:
* selected
Requires:
* stat
"""
def __init__(self, n, sort_descending=True,
all_or_none=False):
super(SelectN, self).__init__()
if n < 0:
raise ValueError('n cannot be negative')
self.n = n
self.ascending = not sort_descending
self.all_or_none = all_or_none
def __call__(self, target):
stat = target.temp['stat'].dropna()
stat.sort(ascending=self.ascending)
# handle percent n
keep_n = self.n
if self.n < 1:
keep_n = int(self.n * len(stat))
sel = list(stat[:keep_n].index)
if self.all_or_none and len(sel) < keep_n:
sel = []
target.temp['selected'] = sel
return True
class SelectMomentum(AlgoStack):
"""
Sets temp['selected'] based on a simple momentum filter.
Selects the top n securities based on the total return over
a given lookback period. This is just a wrapper around an
AlgoStack with two algos: StatTotalReturn and SelectN.
Note, that SelectAll() or similar should be called before
SelectMomentum(), as StatTotalReturn uses values of temp['selected']
Args:
* n (int): select first N elements
* lookback (DateOffset): lookback period for total return
calculation
* lag (DateOffset): Lag interval for total return calculation
* sort_descending (bool): Sort descending (highest return is best)
* all_or_none (bool): If true, only populates temp['selected'] if we
have n items. If we have less than n, then temp['selected'] = [].
Sets:
* selected
Requires:
* selected
"""
def __init__(self, n, lookback=pd.DateOffset(months=3),
lag=pd.DateOffset(days=0), sort_descending=True,
all_or_none=False):
super(SelectMomentum, self).__init__(
StatTotalReturn(lookback=lookback, lag=lag),
SelectN(n=n, sort_descending=sort_descending,
all_or_none=all_or_none))
class SelectWhere(Algo):
"""
Selects securities based on an indicator DataFrame.
Selects securities where the value is True on the current date
(target.now) only if current date is present in signal DataFrame.
For example, this could be the result of a pandas boolean comparison such
as data > 100.
Args:
* signal (DataFrame): Boolean DataFrame containing selection logic.
Sets:
* selected
"""
def __init__(self, signal, include_no_data=False):
self.signal = signal
self.include_no_data = include_no_data
def __call__(self, target):
# get signal Series at target.now
if target.now in self.signal.index:
sig = self.signal.ix[target.now]
#print(sig)
# get tickers where True
selected = sig.index[sig]
# save as list
if not self.include_no_data:
universe = target.universe[
list(selected)].ix[target.now].dropna()
selected = list(universe[universe > 0].index)
target.temp['selected'] = list(selected)
return True
class SelectRandomly(AlgoStack):
"""
Sets temp['selected'] based on a random subset of
the items currently in temp['selected'].
Selects n random elements from the list stored in temp['selected'].
This is useful for benchmarking against a strategy where we believe
the selection algorithm is adding value.
For example, if we are testing a momentum strategy and we want to see if
selecting securities based on momentum is better than just selecting
securities randomly, we could use this Algo to create a random Strategy
used for random benchmarking.
Note:
Another selection algorithm should be use prior to this Algo to
populate temp['selected']. This will typically be SelectAll.
Args:
* n (int): Select N elements randomly.
Sets:
* selected
Requires:
* selected
"""
def __init__(self, n=None, include_no_data=False):
super(SelectRandomly, self).__init__()
self.n = n
self.include_no_data = include_no_data
def __call__(self, target):
if 'selected' in target.temp:
sel = target.temp['selected']
else:
sel = target.universe.columns
if not self.include_no_data:
universe = target.universe[list(sel)].ix[target.now].dropna()
sel = list(universe[universe > 0].index)
if self.n is not None:
n = self.n if self.n < len(sel) else len(sel)
sel = random.sample(sel, n)
target.temp['selected'] = sel
return True
class StatTotalReturn(Algo):
"""
Sets temp['stat'] with total returns over a given period.
Sets the 'stat' based on the total return of each element in
temp['selected'] over a given lookback period. The total return
is determined by ffn's calc_total_return.
Args:
* lookback (DateOffset): lookback period.
* lag (DateOffset): Lag interval. Total return is calculated in
the inteval [now - lookback - lag, now - lag]
Sets:
* stat
Requires:
* selected
"""
def __init__(self, lookback=pd.DateOffset(months=3),
lag=pd.DateOffset(days=0)):
super(StatTotalReturn, self).__init__()
self.lookback = lookback
self.lag = lag
def __call__(self, target):
selected = target.temp['selected']
t0 = target.now - self.lag
prc = target.universe[selected].ix[t0 - self.lookback:t0]
target.temp['stat'] = prc.calc_total_return()
return True
class WeighEqually(Algo):
"""
Sets temp['weights'] by calculating equal weights for all items in
selected.
Equal weight Algo. Sets the 'weights' to 1/n for each item in 'selected'.
Sets:
* weights
Requires:
* selected
"""
def __init__(self):
super(WeighEqually, self).__init__()
def __call__(self, target):
selected = target.temp['selected']
n = len(selected)
if n == 0:
target.temp['weights'] = {}
else:
w = 1.0 / n
target.temp['weights'] = {x: w for x in selected}
return True
class WeighSpecified(Algo):
"""
Sets temp['weights'] based on a provided dict of ticker:weights.
Sets the weights based on pre-specified targets.
Args:
* weights (dict): target weights -> ticker: weight
Sets:
* weights
"""
def __init__(self, **weights):
super(WeighSpecified, self).__init__()
self.weights = weights
def __call__(self, target):
# added copy to make sure these are not overwritten
target.temp['weights'] = self.weights.copy()
return True
class WeighTarget(Algo):
"""
Sets target weights based on a target weight DataFrame.
If the target weight dataFrame is of same dimension
as the target.universe, the portfolio will effectively be rebalanced on each
period. For example, if we have daily data and the target DataFrame is of
the same shape, we will have daily rebalancing.
However, if we provide a target weight dataframe that has only month end
dates, then rebalancing only occurs monthly.
Basically, if a weight is provided on a given date, the target weights are
set and the algo moves on (presumably to a Rebalance algo). If not, not
target weights are set.
Args:
* weights (DataFrame): DataFrame containing the target weights
Sets:
* weights
"""
def __init__(self, weights):
self.weights = weights
def __call__(self, target):
# get current target weights
if target.now in self.weights.index:
w = self.weights.ix[target.now]
# dropna and save
target.temp['weights'] = w.dropna()
return True
else:
return False
class WeighInvVol(Algo):
"""
Sets temp['weights'] based on the inverse volatility Algo.
Sets the target weights based on ffn's calc_inv_vol_weights. This
is a commonly used technique for risk parity portfolios. The least
volatile elements receive the highest weight under this scheme. Weights
are proportional to the inverse of their volatility.
Args:
* lookback (DateOffset): lookback period for estimating volatility
Sets:
* weights
Requires:
* selected
"""
def __init__(self, lookback=pd.DateOffset(months=3)):
super(WeighInvVol, self).__init__()
self.lookback = lookback
def __call__(self, target):
selected = target.temp['selected']
if len(selected) == 0:
target.temp['weights'] = {}
return True
if len(selected) == 1:
target.temp['weights'] = {selected[0]: 1.}
return True
prc = target.universe[selected].ix[target.now - self.lookback:]
tw = bt.ffn.calc_inv_vol_weights(
prc.to_returns().dropna())
target.temp['weights'] = tw.dropna()
return True
class WeighMeanVar(Algo):
"""
Sets temp['weights'] based on mean-variance optimization.
Sets the target weights based on ffn's calc_mean_var_weights. This is a
Python implementation of Markowitz's mean-variance optimization.
See:
http://en.wikipedia.org/wiki/Modern_portfolio_theory#The_efficient_frontier_with_no_risk-free_asset
Args:
* lookback (DateOffset): lookback period for estimating volatility
* bounds ((min, max)): tuple specifying the min and max weights for
each asset in the optimization.
* covar_method (str): method used to estimate the covariance. See ffn's
calc_mean_var_weights for more details.
* rf (float): risk-free rate used in optimization.
Sets:
* weights
Requires:
* selected
"""
def __init__(self, lookback=pd.DateOffset(months=3),
bounds=(0., 1.), covar_method='ledoit-wolf',
rf=0.):
super(WeighMeanVar, self).__init__()
self.lookback = lookback
self.bounds = bounds
self.covar_method = covar_method
self.rf = rf
def __call__(self, target):
selected = target.temp['selected']
if len(selected) == 0:
target.temp['weights'] = {}
return True
if len(selected) == 1:
target.temp['weights'] = {selected[0]: 1.}
return True
prc = target.universe[selected].ix[target.now - self.lookback:]
tw = bt.ffn.calc_mean_var_weights(
prc.to_returns().dropna(), weight_bounds=self.bounds,
covar_method=self.covar_method, rf=self.rf)
target.temp['weights'] = tw.dropna()
return True
class WeighRandomly(Algo):
"""
Sets temp['weights'] based on a random weight vector.
Sets random target weights for each security in 'selected'.
This is useful for benchmarking against a strategy where we believe
the weighing algorithm is adding value.
For example, if we are testing a low-vol strategy and we want to see if
our weighing strategy is better than just weighing
securities randomly, we could use this Algo to create a random Strategy
used for random benchmarking.
This is an Algo wrapper around ffn's random_weights function.
Args:
* bounds ((low, high)): Tuple including low and high bounds for each
security
* weight_sum (float): What should the weights sum up to?
Sets:
* weights
Requires:
* selected
"""
def __init__(self, bounds=(0., 1.), weight_sum=1):
super(WeighRandomly, self).__init__()
self.bounds = bounds
self.weight_sum = weight_sum
def __call__(self, target):
sel = target.temp['selected']
n = len(sel)
w = {}
try:
rw = bt.ffn.random_weights(
n, self.bounds, self.weight_sum)
w = dict(zip(sel, rw))
except ValueError:
pass
target.temp['weights'] = w
return True
class LimitDeltas(Algo):
"""
Modifies temp['weights'] based on weight delta limits.
Basically, this can be used if we want to restrict how much a security's
target weight can change from day to day. Useful when we want to be more
conservative about how much we could actually trade on a given day without
affecting the market.
For example, if we have a strategy that is currently long 100% one
security, and the weighing Algo sets the new weight to 0%, but we
use this Algo with a limit of 0.1, the new target weight will
be 90% instead of 0%.
Args:
* limit (float, dict): Weight delta limit. If float, this will be a
global limit for all securities. If dict, you may specify by-ticker
limit.
Sets:
* weights
Requires:
* weights
"""
def __init__(self, limit=0.1):
super(LimitDeltas, self).__init__()
self.limit = limit
# determine if global or specific
self.global_limit = True
if isinstance(limit, dict):
self.global_limit = False
def __call__(self, target):
tw = target.temp['weights']
all_keys = set(target.children.keys() + tw.keys())
for k in all_keys:
tgt = tw[k] if k in tw else 0.
cur = target.children[k].weight if k in target.children else 0.
delta = tgt - cur
# check if we need to limit
if self.global_limit:
if abs(delta) > self.limit:
tw[k] = cur + (self.limit * np.sign(delta))
else:
# make sure we have a limit defined in case of limit dict
if k in self.limit:
lmt = self.limit[k]
if abs(delta) > lmt:
tw[k] = cur + (lmt * np.sign(delta))
return True
class LimitWeights(Algo):
"""
Modifies temp['weights'] based on weight limits.
This is an Algo wrapper around ffn's limit_weights. The purpose of this
Algo is to limit the weight of any one specifc asset. For example, some
Algos will set some rather extreme weights that may not be acceptable.
Therefore, we can use this Algo to limit the extreme weights. The excess
weight is then redistributed to the other assets, proportionally to
their current weights.
See ffn's limit_weights for more information.
Args:
* limit (float): Weight limit.
Sets:
* weights
Requires:
* weights
"""
def __init__(self, limit=0.1):
super(LimitWeights, self).__init__()
self.limit = limit
def __call__(self, target):
if 'weights' not in target.temp:
return True
tw = target.temp['weights']
if len(tw) == 0:
return True
tw = bt.ffn.limit_weights(tw, self.limit)
target.temp['weights'] = tw
return True
class CapitalFlow(Algo):
"""
Used to model capital flows. Flows can either be inflows or outflows.
This Algo can be used to model capital flows. For example, a pension
fund might have inflows every month or year due to contributions. This
Algo will affect the capital of the target node without affecting returns
for the node.
Since this is modeled as an adjustment, the capital will remain in the
strategy until a re-allocation/rebalancement is made.
Args:
* amount (float): Amount of adjustment
"""
def __init__(self, amount):
"""
CapitalFlow constructor.
Args:
* amount (float): Amount to adjust by
"""
super(CapitalFlow, self).__init__()
self.amount = float(amount)
def __call__(self, target):
target.adjust(self.amount)
return True
class CloseDead(Algo):
"""
Closes all positions for which prices are equal to zero (we assume
that these stocks are dead) and removes them from temp['weights'] if
they enter it by any chance.
To be called before Rebalance().
In a normal workflow it is not needed, as those securities will not
be selected by SelectAll(include_no_data=False) or similar method, and
Rebalance() closes positions that are not in temp['weights'] anyway.
However in case when for some reasons include_no_data=False could not
be used or some modified weighting method is used, CloseDead() will
allow to avoid errors.
Requires:
* weights
"""
def __init__(self):
super(CloseDead, self).__init__()
def __call__(self, target):
if 'weights' not in target.temp:
return True
targets = target.temp['weights']
for c in target.children:
if target.universe[c].ix[target.now] <= 0:
target.close(c)
if c in targets:
del targets[c]
return True
class Rebalance(Algo):
"""
Rebalances capital based on temp['weights']
Rebalances capital based on temp['weights']. Also closes
positions if open but not in target_weights. This is typically
the last Algo called once the target weights have been set.
Requires:
* weights
"""
def __init__(self):
super(Rebalance, self).__init__()
def __call__(self, target):
if 'weights' not in target.temp:
return True
targets = target.temp['weights']
# de-allocate children that are not in targets
not_in = [x for x in target.children if x not in targets]
for c in not_in:
target.close(c)
# save value because it will change after each call to allocate
# use it as base in rebalance calls
base = target.value
for item in targets.iteritems():
target.rebalance(item[1], child=item[0], base=base)
return True
class RebalanceOverTime(Algo):
"""
Similar to Rebalance but rebalances to target
weight over n periods.
Rebalances towards a target weight over a n periods. Splits up the weight
delta over n periods.
This can be useful if we want to make more conservative rebalacing
assumptions. Some strategies can produce large swings in allocations. It
might not be reasonable to assume that this rebalancing can occur at the
end of one specific period. Therefore, this algo can be used to simulate
rebalancing over n periods.
This has typically been used in monthly strategies where we want to spread
out the rebalancing over 5 or 10 days.
Note:
This Algo will require the run_always wrapper in the above case. For
example, the RunMonthly will return True on the first day, and
RebalanceOverTime will be 'armed'. However, RunMonthly will return
False the rest days of the month. Therefore, we must specify that we
want to always run this algo.
Args:
* n (int): number of periods over which rebalancing takes place.
Requires:
* weights
"""
def __init__(self, n=10):
super(RebalanceOverTime, self).__init__()
self.n = float(n)
self._rb = Rebalance()
self._weights = None
self._days_left = None
def __call__(self, target):
# new weights specified - update rebalance data
if 'weights' in target.temp:
self._weights = target.temp['weights']
self._days_left = self.n
# if _weights are not None, we have some work to do
if self._weights:
tgt = {}
# scale delta relative to # of periods left and set that as the new
# target
for t in self._weights:
curr = target.children[t].weight if t in \
target.children else 0.
dlt = (self._weights[t] - curr) / self._days_left
tgt[t] = curr + dlt
# mock weights and call real Rebalance
target.temp['weights'] = tgt
self._rb(target)
# dec _days_left. If 0, set to None & set _weights to None
self._days_left -= 1
if self._days_left == 0:
self._days_left = None
self._weights = None
return True
class Require(Algo):
"""
Flow control Algo.
This algo returns the value of a predicate
on an temp entry. Useful for controlling
flow.
For example, we might want to make sure we have some items selected.
We could pass a lambda function that checks the len of 'selected':
pred=lambda x: len(x) == 0
item='selected'
Args:
* pred (Algo): Function that returns a Bool given the strategy. This
is the definition of an Algo. However, this is typically used
with a simple lambda function.
* item (str): An item within temp.
* if_none (bool): Result if the item required is not in temp or if it's
value if None
"""
def __init__(self, pred, item, if_none=False):
super(Require, self).__init__()
self.item = item
self.pred = pred
self.if_none = if_none
def __call__(self, target):
if self.item not in target.temp:
return self.if_none
item = target.temp[self.item]
if item is None:
return self.if_none
return self.pred(item)
| apache-2.0 |
biocore/qiita | qiita_db/processing_job.py | 2 | 94348 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
import networkx as nx
import qiita_db as qdb
import pandas as pd
from collections import defaultdict, Iterable
from datetime import datetime
from itertools import chain
from json import dumps, loads
from multiprocessing import Process, Queue, Event
from re import search, findall
from subprocess import Popen, PIPE
from time import sleep, time
from uuid import UUID
from os.path import join
from threading import Thread
from humanize import naturalsize
from qiita_core.exceptions import IncompetentQiitaDeveloperError
from qiita_core.qiita_settings import qiita_config
from qiita_db.util import create_nested_path
class Watcher(Process):
# TODO: Qiita will need a proper mapping of these states to Qiita states
# Currently, these strings are being inserted directly into Qiita's status
# table. Qiita will be unfamiliar with many of these. We will need at least
# one additional job type for 'Held': A job waiting for another to complete
# before it can run.
#
# Note that the main Qiita script instantiates an object of this class in
# a separate thread, so it can periodically update the database w/metadata
# from Watcher's queue. Qiita's script also calls qdb.complete() so there
# are no circular references. TODO: replace w/a REST call.
# valid Qiita states:
# The current status of the job, one of {'queued', 'running',
# 'success', 'error', 'in_construction', 'waiting'}
# TODO: what to map in_construction to?
job_state_map = {'C': 'completed', 'E': 'exiting', 'H': 'held',
'Q': 'queued', 'R': 'running', 'T': 'moving',
'W': 'waiting', 'S': 'suspended'}
# TODO: moving, waiting, and suspended have been mapped to
# 'running' in Qiita, as 'waiting' in Qiita connotes that the
# main job itself has completed, and is waiting on validator
# jobs to finish, etc. Revisit
torque_to_qiita_state_map = {'completed': 'completed',
'held': 'queued',
'queued': 'queued',
'exiting': 'running',
'running': 'running',
'moving': 'running',
'waiting': 'running',
'suspended': 'running',
'DROPPED': 'error'}
def __init__(self):
super(Watcher, self).__init__()
# set self.owner to qiita, or whomever owns processes we need to watch.
self.owner = qiita_config.trq_owner
# Torque is set to drop jobs from its queue 60 seconds after
# completion, by default. Setting a polling value less than
# that allows for multiple chances to catch the exit status
# before it disappears.
self.polling_value = qiita_config.trq_poll_val
# the cross-process method by which to communicate across
# process boundaries. Note that when Watcher object runs,
# another process will get created, and receive a copy of
# the Watcher object. At this point, these self.* variables
# become local to each process. Hence, the main process
# can't see self.processes for example; theirs will just
# be empty.
self.queue = Queue()
self.processes = {}
# the cross-process sentinel value to shutdown Watcher
self.event = Event()
def _element_extract(self, snippet, list_of_elements,
list_of_optional_elements):
results = {}
missing_elements = []
for element in list_of_elements:
value = search('<%s>(.*?)</%s>' % (element, element), snippet)
if value:
results[element] = value.group(1)
else:
missing_elements.append(element)
if missing_elements:
raise AssertionError("The following elements were not found: %s"
% ', '.join(missing_elements))
for element in list_of_optional_elements:
value = search('<%s>(.*?)</%s>' % (element, element), snippet)
if value:
results[element] = value.group(1)
return results
def _process_dependent_jobs(self, results):
# when a job has its status changed, check to see if the job completed
# with an error. If so, check to see if it had any jobs that were being
# 'held' on this job's successful completion. If we are maintaining
# state on any of these jobs, mark them as 'DROPPED', because they will
# no longer appear in qstat output.
if results['job_state'] == 'completed':
if results['exit_status'] == '0':
return
if 'depend' in results:
tmp = results['depend'].split(':')
if tmp[0] == 'beforeok':
tmp.pop(0)
for child_job_id in tmp:
# jobs in 'beforeok' are labeled with the complete
# job id and what looks to be the server name doing
# the work. For now, simply remove the
# '@host.domain.org' (server) component.
child_job_id = child_job_id.split('@')[0]
self.processes[child_job_id]['job_state'] = 'DROPPED'
self.queue.put(self.processes[child_job_id])
def run(self):
# check to see if qstat is available. If not, exit immediately.
proc = Popen("qstat -x", shell=True, stdout=PIPE, stderr=PIPE)
proc.wait()
if proc.returncode != 0:
# inform any process expecting data from Watcher
self.queue.put('QUIT')
self.event.set()
while not self.event.is_set():
proc = Popen("qstat -x", shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
if proc.returncode == 0:
# qstat returned successfully with metadata on processes
# break up metadata into individual <Job></Job> elements
# for processing.
m = findall('<Job>(.*?)</Job>', stdout.decode('ascii'))
for item in m:
# filter out jobs that don't belong to owner
if search('<Job_Owner>%s</Job_Owner>' % self.owner, item):
# extract the metadata we want.
# if a job has completed, an exit_status element will
# be present. We also want that.
results = self._element_extract(item, ['Job_Id',
'Job_Name',
'job_state'],
['depend'])
tmp = Watcher.job_state_map[results['job_state']]
results['job_state'] = tmp
if results['job_state'] == 'completed':
results2 = self._element_extract(item,
['exit_status'],
[])
results['exit_status'] = results2['exit_status']
# determine if anything has changed since last poll
if results['Job_Id'] in self.processes:
if self.processes[results['Job_Id']] != results:
# metadata for existing job has changed
self.processes[results['Job_Id']] = results
self.queue.put(results)
self._process_dependent_jobs(results)
else:
# metadata for new job inserted
self.processes[results['Job_Id']] = results
self.queue.put(results)
else:
self.queue.put('QUIT')
self.event.set()
# don't join(), since we are exiting from the main loop
sleep(self.polling_value)
def stop(self):
# 'poison pill' to thread/process
self.queue.put('QUIT')
# setting self.event is a safe way of communicating a boolean
# value across processes and threads.
# when this event is 'set' by the main line of execution in Qiita,
# (or in any other process if need be), Watcher's run loop will
# stop and the Watcher process will exit.
self.event.set()
# Here, it is assumed that we are running this from the main
# context. By joining(), we're waiting for the Watcher process to
# end before returning from this method.
self.join()
def launch_local(env_script, start_script, url, job_id, job_dir):
# launch_local() differs from launch_torque(), as no Watcher() is used.
# each launch_local() process will execute the cmd as a child process,
# wait, and update the database once cmd has completed.
#
# As processes are lighter weight than jobs, this should be fine.
# This is how the current job model works locally.
cmd = [start_script, url, job_id, job_dir]
# When Popen() executes, the shell is not in interactive mode,
# so it is not sourcing any of the bash configuration files
# We need to source it so the env_script are available
cmd = "bash -c '%s; %s'" % (env_script, ' '.join(cmd))
# Popen() may also need universal_newlines=True
proc = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
# Communicate pulls all stdout/stderr from the PIPEs
# This call waits until cmd is done
stdout, stderr = proc.communicate()
# proc.returncode will be equal to None if the process hasn't finished
# yet. If cmd was terminated by a SIGNAL, it will be a negative value.
# (*nix platforms only)
error = None
if proc.returncode != 0:
error = "error from launch_local when launching cmd='%s'" % cmd
error = "%s\n%s\n%s" % (error, stdout, stderr)
# Forcing the creation of a new connection
qdb.sql_connection.create_new_transaction()
ProcessingJob(job_id).complete(False, error=error)
def launch_torque(env_script, start_script, url, job_id, job_dir,
dependent_job_id, resource_params):
# note that job_id is Qiita's UUID, not a Torque job ID
cmd = [start_script, url, job_id, job_dir]
# generating file contents to be used with qsub
lines = []
# TODO: is PBS_JOBID is being set correctly?
lines.append("echo $PBS_JOBID")
# TODO: revisit below
lines.append("source ~/.bash_profile")
lines.append(env_script)
lines.append(' '.join(cmd))
# writing the file to be used with qsub
create_nested_path(job_dir)
fp = join(job_dir, '%s.txt' % job_id)
with open(fp, 'w') as torque_job_file:
torque_job_file.write("\n".join(lines))
qsub_cmd = ['qsub']
if dependent_job_id:
# note that a dependent job should be submitted before the
# 'parent' job ends, most likely. Torque doesn't keep job state
# around forever, and creating a dependency on a job already
# completed has not been tested.
qsub_cmd.append("-W")
qsub_cmd.append("depend=afterok:%s" % dependent_job_id)
qsub_cmd.append(resource_params)
qsub_cmd.append(fp)
qsub_cmd.append("-o")
qsub_cmd.append("%s/qsub-output.txt" % job_dir)
qsub_cmd.append("-e")
qsub_cmd.append("%s/qsub-error.txt" % job_dir)
# TODO: revisit epilogue
qsub_cmd.append("-l")
qsub_cmd.append("epilogue=/home/qiita/qiita-epilogue.sh")
# Popen() may also need universal_newlines=True
# may also need stdout = stdout.decode("utf-8").rstrip()
qsub_cmd = ' '.join(qsub_cmd)
# Qopen is a wrapper for Popen() that allows us to wait on a qsub
# call, but return if the qsub command is not returning after a
# prolonged period of time.
q = Qopen(qsub_cmd)
q.start()
# wait for qsub_cmd to finish, but not longer than the number of
# seconds specified below.
init_time = time()
q.join(5)
total_time = time() - init_time
# for internal use, logging if the time is larger than 2 seconds
if total_time > 2:
qdb.logger.LogEntry.create('Runtime', 'qsub return time', info={
'time_in_seconds': str(total_time)})
# if q.returncode is None, it's because qsub did not return.
if q.returncode is None:
e = "Error Torque configuration information incorrect: %s" % qsub_cmd
raise IncompetentQiitaDeveloperError(e)
# q.returncode in this case means qsub successfully pushed the job
# onto Torque's queue.
if q.returncode != 0:
raise AssertionError("Error Torque could not launch %s (%d)" %
(qsub_cmd, q.returncode))
torque_job_id = q.stdout.decode('ascii').strip('\n')
return torque_job_id
class Qopen(Thread):
def __init__(self, cmd):
super(Qopen, self).__init__()
self.cmd = cmd
self.stdout = None
self.stderr = None
self.returncode = None
def run(self):
proc = Popen(self.cmd, shell=True, stdout=PIPE, stderr=PIPE)
self.stdout, self.stderr = proc.communicate()
self.returncode = proc.returncode
def _system_call(cmd):
"""Execute the command `cmd`
Parameters
----------
cmd : str
The string containing the command to be run.
Returns
-------
tuple of (str, str, int)
The standard output, standard error and exist status of the
executed command
Notes
-----
This function is ported from QIIME (http://www.qiime.org), previously named
qiime_system_call. QIIME is a GPL project, but we obtained permission from
the authors of this function to port it to Qiita and keep it under BSD
license.
"""
proc = Popen(cmd, universal_newlines=True, shell=True, stdout=PIPE,
stderr=PIPE)
# Communicate pulls all stdout/stderr from the PIPEs
# This call blocks until the command is done
stdout, stderr = proc.communicate()
return_value = proc.returncode
return stdout, stderr, return_value
class ProcessingJob(qdb.base.QiitaObject):
r"""Models a job that executes a command in a set of artifacts
Attributes
----------
user
command
parameters
status
log
heartbeat
step
Methods
-------
exists
create
"""
_table = 'processing_job'
_launch_map = {'qiita-plugin-launcher':
{'function': launch_local,
'execute_in_process': False},
'qiita-plugin-launcher-qsub':
{'function': launch_torque,
'execute_in_process': True}}
@classmethod
def exists(cls, job_id):
"""Check if the job `job_id` exists
Parameters
----------
job_id : str
The job id
Returns
-------
bool
True if the job `job_id` exists. False otherwise.
"""
try:
UUID(job_id)
except ValueError:
return False
with qdb.sql_connection.TRN:
sql = """SELECT EXISTS(SELECT *
FROM qiita.processing_job
WHERE processing_job_id = %s)"""
qdb.sql_connection.TRN.add(sql, [job_id])
return qdb.sql_connection.TRN.execute_fetchlast()
@classmethod
def by_external_id(cls, external_id):
"""Return Qiita Job UUID associated with external_id
Parameters
----------
external_id : str
An external id (e.g. Torque Job ID)
Returns
-------
str
Qiita Job UUID, if found, otherwise None
"""
with qdb.sql_connection.TRN:
sql = """SELECT processing_job_id FROM qiita.processing_job
WHERE external_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [external_id])
return cls(qdb.sql_connection.TRN.execute_fetchlast())
def get_resource_allocation_info(self):
"""Return resource allocation defined for this job. For
external computational resources only.
Returns
-------
str
A resource allocation string useful to the external resource
"""
with qdb.sql_connection.TRN:
if self.command.name == 'complete_job':
jtype = 'COMPLETE_JOBS_RESOURCE_PARAM'
v = loads(self.parameters.values['payload'])
# assume an empty string for name is preferable to None
name = ''
if v['artifacts'] is not None:
an_element = list(v['artifacts'].keys())[0]
name = v['artifacts'][an_element]['artifact_type']
elif self.command.name == 'release_validators':
jtype = 'RELEASE_VALIDATORS_RESOURCE_PARAM'
tmp = ProcessingJob(self.parameters.values['job'])
name = tmp.parameters.command.name
elif self.command.name == 'Validate':
jtype = 'VALIDATOR'
name = self.parameters.values['artifact_type']
elif self.id == 'register':
jtype = 'REGISTER'
name = 'REGISTER'
else:
# assume anything else is a command
jtype = 'RESOURCE_PARAMS_COMMAND'
name = self.command.name
# first, query for resources matching name and type
sql = """SELECT allocation FROM
qiita.processing_job_resource_allocation
WHERE name = %s and job_type = %s"""
qdb.sql_connection.TRN.add(sql, [name, jtype])
result = qdb.sql_connection.TRN.execute_fetchflatten()
# if no matches for both type and name were found, query the
# 'default' value for the type
if not result:
sql = """SELECT allocation FROM
qiita.processing_job_resource_allocation WHERE
name = %s and job_type = %s"""
qdb.sql_connection.TRN.add(sql, ['default', jtype])
result = qdb.sql_connection.TRN.execute_fetchflatten()
if not result:
AssertionError(
"Could not match %s to a resource allocation!" % name)
allocation = result[0]
if ('{samples}' in allocation or '{columns}' in allocation or
'{input_size}' in allocation):
samples, columns, input_size = self.shape
parts = []
for part in allocation.split(' '):
if ('{samples}' in part or '{columns}' in part or
'{input_size}' in part):
variable, value = part.split('=')
error_msg = ('Obvious incorrect allocation. Please '
'contact [email protected]')
# to make sure that the formula is correct and avoid
# possible issues with conversions, we will check that
# all the variables {samples}/{columns}/{input_size}
# present in the formula are not None, if any is None
# we will set the job's error (will stop it) and the
# message is gonna be shown to the user within the job
if (('{samples}' in value and samples is None) or
('{columns}' in value and columns is None) or
('{input_size}' in value and input_size is
None)):
self._set_error(error_msg)
return 'Not valid'
try:
# if eval has something that can't be processed
# it will raise a NameError
mem = eval(value.format(
samples=samples, columns=columns,
input_size=input_size))
except NameError:
self._set_error(error_msg)
return 'Not valid'
else:
if mem <= 0:
self._set_error(error_msg)
return 'Not valid'
value = naturalsize(mem, gnu=True, format='%.0f')
part = '%s=%s' % (variable, value)
parts.append(part)
allocation = ' '.join(parts)
return allocation
@classmethod
def create(cls, user, parameters, force=False):
"""Creates a new job in the system
Parameters
----------
user : qiita_db.user.User
The user executing the job
parameters : qiita_db.software.Parameters
The parameters of the job being executed
force : bool
Force creation on duplicated parameters
Returns
-------
qiita_db.processing_job.ProcessingJob
The newly created job
Notes
-----
If force is True the job is going to be created even if another job
exists with the same parameters
"""
TTRN = qdb.sql_connection.TRN
with TTRN:
command = parameters.command
# check if a job with the same parameters already exists
sql = """SELECT processing_job_id, email, processing_job_status,
COUNT(aopj.artifact_id)
FROM qiita.processing_job
LEFT JOIN qiita.processing_job_status
USING (processing_job_status_id)
LEFT JOIN qiita.artifact_output_processing_job aopj
USING (processing_job_id)
WHERE command_id = %s AND processing_job_status IN (
'success', 'waiting', 'running', 'in_construction') {0}
GROUP BY processing_job_id, email,
processing_job_status"""
# we need to use ILIKE because of booleans as they can be
# false or False
params = []
for k, v in parameters.values.items():
# this is necessary in case we have an Iterable as a value
# but that is string
if isinstance(v, Iterable) and not isinstance(v, str):
for vv in v:
params.extend([k, str(vv)])
else:
params.extend([k, str(v)])
if params:
# divided by 2 as we have key-value pairs
len_params = int(len(params)/2)
sql = sql.format(' AND ' + ' AND '.join(
["command_parameters->>%s ILIKE %s"] * len_params))
params = [command.id] + params
TTRN.add(sql, params)
else:
# the sql variable expects the list of parameters but if there
# is no param we need to replace the {0} with an empty string
TTRN.add(sql.format(""), [command.id])
# checking that if the job status is success, it has children
# [2] status, [3] children count
existing_jobs = [r for r in TTRN.execute_fetchindex()
if r[2] != 'success' or r[3] > 0]
if existing_jobs and not force:
raise ValueError(
'Cannot create job because the parameters are the same as '
'jobs that are queued, running or already have '
'succeeded:\n%s' % '\n'.join(
["%s: %s" % (jid, status)
for jid, _, status, _ in existing_jobs]))
sql = """INSERT INTO qiita.processing_job
(email, command_id, command_parameters,
processing_job_status_id)
VALUES (%s, %s, %s, %s)
RETURNING processing_job_id"""
status = qdb.util.convert_to_id(
"in_construction", "processing_job_status")
sql_args = [user.id, command.id,
parameters.dump(), status]
TTRN.add(sql, sql_args)
job_id = TTRN.execute_fetchlast()
# Link the job with the input artifacts
sql = """INSERT INTO qiita.artifact_processing_job
(artifact_id, processing_job_id)
VALUES (%s, %s)"""
pending = defaultdict(dict)
for pname, vals in command.parameters.items():
if vals[0] == 'artifact':
artifact_info = parameters.values[pname]
# If the artifact_info is a list, then the artifact
# still doesn't exists because the current job is part
# of a workflow, so we can't link
if not isinstance(artifact_info, list):
TTRN.add(sql, [artifact_info, job_id])
else:
pending[artifact_info[0]][pname] = artifact_info[1]
elif pname == 'artifact':
TTRN.add(sql, [parameters.values[pname], job_id])
if pending:
sql = """UPDATE qiita.processing_job
SET pending = %s
WHERE processing_job_id = %s"""
TTRN.add(sql, [dumps(pending), job_id])
TTRN.execute()
return cls(job_id)
@property
def user(self):
"""The user that launched the job
Returns
-------
qiita_db.user.User
The user that launched the job
"""
with qdb.sql_connection.TRN:
sql = """SELECT email
FROM qiita.processing_job
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
email = qdb.sql_connection.TRN.execute_fetchlast()
return qdb.user.User(email)
@property
def command(self):
"""The command that the job executes
Returns
-------
qiita_db.software.Command
The command that the job executes
"""
with qdb.sql_connection.TRN:
sql = """SELECT command_id
FROM qiita.processing_job
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
cmd_id = qdb.sql_connection.TRN.execute_fetchlast()
return qdb.software.Command(cmd_id)
@property
def parameters(self):
"""The parameters used in the job's command
Returns
-------
qiita_db.software.Parameters
The parameters used in the job's command
"""
with qdb.sql_connection.TRN:
sql = """SELECT command_id, command_parameters
FROM qiita.processing_job
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
res = qdb.sql_connection.TRN.execute_fetchindex()[0]
return qdb.software.Parameters.load(
qdb.software.Command(res[0]), values_dict=res[1])
@property
def input_artifacts(self):
"""The artifacts used as input in the job
Returns
-------
list of qiita_db.artifact.Artifact
The artifacs used as input in the job
"""
with qdb.sql_connection.TRN:
sql = """SELECT artifact_id
FROM qiita.artifact_processing_job
WHERE processing_job_id = %s
ORDER BY artifact_id"""
qdb.sql_connection.TRN.add(sql, [self.id])
return [qdb.artifact.Artifact(aid)
for aid in qdb.sql_connection.TRN.execute_fetchflatten()]
@property
def status(self):
"""The status of the job
Returns
-------
str
The current status of the job, one of {'queued', 'running',
'success', 'error', 'in_construction', 'waiting'}
"""
with qdb.sql_connection.TRN:
sql = """SELECT processing_job_status
FROM qiita.processing_job_status
JOIN qiita.processing_job
USING (processing_job_status_id)
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchlast()
def _set_status(self, value):
"""Sets the status of the job
Parameters
----------
value : str, {'queued', 'running', 'success', 'error',
'in_construction', 'waiting'}
The new status of the job
Raises
------
qiita_db.exceptions.QiitaDBStatusError
- If the current status of the job is 'success'
- If the current status of the job is 'running' and `value` is
'queued'
"""
with qdb.sql_connection.TRN:
current_status = self.status
if current_status == 'success':
raise qdb.exceptions.QiitaDBStatusError(
"Cannot change the status of a 'success' job")
elif current_status == 'running' and value == 'queued':
raise qdb.exceptions.QiitaDBStatusError(
"Cannot revert the status of a 'running' job to 'queued'")
new_status = qdb.util.convert_to_id(
value, "processing_job_status")
if (new_status in ('running', 'success', 'error') and
not self.command.analysis_only and
self.user.level == 'admin'):
subject = ('Job status change: %s (%s)' % (
self.command.name, self.id))
message = ('New status: %s' % (new_status))
qdb.util.send_email(self.user.email, subject, message)
sql = """UPDATE qiita.processing_job
SET processing_job_status_id = %s
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [new_status, self.id])
qdb.sql_connection.TRN.execute()
@property
def external_id(self):
"""Retrieves the external id"""
with qdb.sql_connection.TRN:
sql = """SELECT external_job_id
FROM qiita.processing_job
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
result = qdb.sql_connection.TRN.execute_fetchlast()
if result is None:
result = 'Not Available'
return result
@external_id.setter
def external_id(self, value):
"""Sets the external job id of the job
Parameters
----------
value : str, {'queued', 'running', 'success', 'error',
'in_construction', 'waiting'}
The job's new status
Raises
------
qiita_db.exceptions.QiitaDBStatusError
- If the current status of the job is 'success'
- If the current status of the job is 'running' and `value` is
'queued'
"""
sql = """UPDATE qiita.processing_job
SET external_job_id = %s
WHERE processing_job_id = %s"""
qdb.sql_connection.perform_as_transaction(sql, [value, self.id])
@property
def release_validator_job(self):
"""Retrieves the release validator job
Returns
-------
qiita_db.processing_job.ProcessingJob or None
The release validator job of this job
"""
rvalidator = None
with qdb.sql_connection.TRN:
sql = """SELECT processing_job_id
FROM qiita.processing_job
WHERE command_id in (
SELECT command_id
FROM qiita.software_command
WHERE name = 'release_validators')
AND command_parameters->>'job' = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
results = qdb.sql_connection.TRN.execute_fetchflatten()
if results:
rvalidator = ProcessingJob(results[0])
return rvalidator
def submit(self, parent_job_id=None, dependent_jobs_list=None):
"""Submits the job to execution
This method has the ability to submit itself, as well as a list of
other ProcessingJob objects. If a list of ProcessingJob objects is
supplied, they will be submitted conditionally on the successful
execution of this object.
Users of this method don't need to set parent_job_id. It is used
internally by submit() for subsequent submit() calls for dependents.
Raises
------
QiitaDBOperationNotPermittedError
If the job is not in 'waiting' or 'in_construction' status
"""
with qdb.sql_connection.TRN:
status = self.status
if status not in {'in_construction', 'waiting'}:
raise qdb.exceptions.QiitaDBOperationNotPermittedError(
"Can't submit job, not in 'in_construction' or "
"'waiting' status. Current status: %s" % status)
self._set_status('queued')
# At this point we are going to involve other processes. We need
# to commit the changes to the DB or the other processes will not
# see these changes
qdb.sql_connection.TRN.commit()
job_dir = join(qdb.util.get_work_base_dir(), self.id)
software = self.command.software
plugin_start_script = software.start_script
plugin_env_script = software.environment_script
# Appending the portal URL so the job requests the information from the
# portal server that submitted the job
url = "%s%s" % (qiita_config.base_url, qiita_config.portal_dir)
# if the word ENVIRONMENT is in the plugin_env_script we have a special
# case where we are going to execute some command and then wait for the
# plugin to return their own id (first implemented for
# fast-bowtie2+woltka)
if 'ENVIRONMENT' in plugin_env_script:
# the job has to be in running state so the plugin can change its`
# status
with qdb.sql_connection.TRN:
self._set_status('running')
qdb.sql_connection.TRN.commit()
create_nested_path(job_dir)
cmd = (f'{plugin_env_script}; {plugin_start_script} '
f'{url} {self.id} {job_dir}')
stdout, stderr, return_value = _system_call(cmd)
if return_value != 0 or stderr != '':
self._set_error(stderr)
job_id = stdout
# note that dependent jobs, such as m validator jobs marshalled into
# n 'queues' require the job_id returned by an external scheduler such
# as Torque's MOAB, rather than a job name that can be defined within
# Qiita. Hence, this method must be able to handle the case where a job
# requires metadata from a late-defined and time-sensitive source.
elif qiita_config.plugin_launcher in ProcessingJob._launch_map:
launcher = ProcessingJob._launch_map[qiita_config.plugin_launcher]
if launcher['execute_in_process']:
# run this launcher function within this process.
# usually this is done if the launcher spawns other processes
# before returning immediately, usually with a job ID that can
# be used to monitor the job's progress.
resource_params = self.get_resource_allocation_info()
# note that parent_job_id is being passed transparently from
# submit declaration to the launcher.
# TODO: In proc launches should throw exceptions, that are
# handled by this code. Out of proc launches will need to
# handle exceptions by catching them and returning an error
# code.
job_id = launcher['function'](plugin_env_script,
plugin_start_script,
url,
self.id,
job_dir,
parent_job_id, resource_params)
if dependent_jobs_list:
# a dependent_jobs_list will always have at least one
# job
next_job = dependent_jobs_list.pop(0)
if not dependent_jobs_list:
# dependent_jobs_list is now empty
dependent_jobs_list = None
# The idea here is that a list of jobs is considered a
# chain. Each job in the chain is submitted with the job
# id of job submitted before it; a job will only run if
# 'parent_job' ran successfully. Each iteration of submit()
# launches a job, pulls the next job from the list, and
# submits it. The remainder of the list is also passed to
# continue the process.
next_job.submit(parent_job_id=job_id,
dependent_jobs_list=dependent_jobs_list)
elif not launcher['execute_in_process']:
# run this launcher function as a new process.
# usually this is done if the launcher performs work that takes
# an especially long time, or waits for children who perform
# such work.
p = Process(target=launcher['function'],
args=(plugin_env_script,
plugin_start_script,
url,
self.id,
job_dir))
p.start()
job_id = p.pid
if dependent_jobs_list:
# for now, treat dependents as independent when
# running locally. This means they will not be
# organized into n 'queues' or 'chains', and
# will all run simultaneously.
for dependent in dependent_jobs_list:
p = Process(target=launcher['function'],
args=(plugin_env_script,
plugin_start_script,
url,
self.id,
job_dir))
p.start()
else:
error = ("execute_in_process must be defined",
"as either true or false")
raise AssertionError(error)
else:
error = "plugin_launcher should be one of two values for now"
raise AssertionError(error)
# note that at this point, self.id is Qiita's UUID for a Qiita
# job. job_id at this point is an external ID (e.g. Torque Job
# ID). Record the mapping between job_id and self.id using
# external_id.
if job_id is not None:
self.external_id = job_id
def release(self):
"""Releases the job from the waiting status and creates the artifact
Returns
-------
dict of {int: int}
The mapping between the job output and the artifact
"""
with qdb.sql_connection.TRN:
if self.command.software.type != 'artifact definition':
raise qdb.exceptions.QiitaDBOperationNotPermittedError(
"Only artifact definition jobs can be released")
# Retrieve the artifact information from the DB
sql = """SELECT artifact_info
FROM qiita.processing_job_validator
WHERE validator_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
a_info = qdb.sql_connection.TRN.execute_fetchlast()
provenance = loads(self.parameters.values['provenance'])
job = ProcessingJob(provenance['job'])
if 'data_type' in a_info:
# This job is resulting from a private job
parents = None
params = None
cmd_out_id = None
name = None
data_type = a_info['data_type']
analysis = qdb.analysis.Analysis(
job.parameters.values['analysis'])
a_info = a_info['artifact_data']
else:
# This job is resulting from a plugin job
parents = job.input_artifacts
params = job.parameters
cmd_out_id = provenance['cmd_out_id']
name = provenance['name']
analysis = None
data_type = None
# Create the artifact
atype = a_info['artifact_type']
filepaths = a_info['filepaths']
a = qdb.artifact.Artifact.create(
filepaths, atype, parents=parents,
processing_parameters=params,
analysis=analysis, data_type=data_type, name=name)
self._set_status('success')
mapping = {}
if cmd_out_id is not None:
mapping = {cmd_out_id: a.id}
return mapping
def release_validators(self):
"""Allows all the validator job spawned by this job to complete"""
if self.command.software.type not in ('artifact transformation',
'private'):
raise qdb.exceptions.QiitaDBOperationNotPermittedError(
"Only artifact transformation and private jobs can "
"release validators")
# Check if all the validators are completed. Validator jobs can be
# in two states when completed: 'waiting' in case of success
# or 'error' otherwise
validator_ids = ['%s [%s]' % (j.id, j.external_id)
for j in self.validator_jobs
if j.status not in ['waiting', 'error']]
# Active polling - wait until all validator jobs are completed
# TODO: As soon as we see one errored validator, we should kill
# the other jobs and exit early. Don't wait for all of the jobs
# to complete.
while validator_ids:
jids = ', '.join(validator_ids)
self.step = ("Validating outputs (%d remaining) via "
"job(s) %s" % (len(validator_ids), jids))
sleep(10)
validator_ids = ['%s [%s]' % (j.id, j.external_id)
for j in self.validator_jobs
if j.status not in ['waiting', 'error']]
# Check if any of the validators errored
errored = [j for j in self.validator_jobs
if j.status == 'error']
if errored:
# At least one of the validators failed, Set the rest of the
# validators and the current job as failed
waiting = [j.id for j in self.validator_jobs
if j.status == 'waiting']
common_error = "\n".join(
["Validator %s error message: %s" % (j.id, j.log.msg)
for j in errored])
val_error = "%d sister validator jobs failed: %s" % (
len(errored), common_error)
for j in waiting:
ProcessingJob(j)._set_error(val_error)
self._set_error('%d validator jobs failed: %s'
% (len(errored), common_error))
else:
mapping = {}
# Loop through all validator jobs and release them, allowing
# to create the artifacts. Note that if any artifact creation
# fails, the rollback operation will make sure that the
# previously created artifacts are not in there
for vjob in self.validator_jobs:
mapping.update(vjob.release())
if mapping:
sql = """INSERT INTO
qiita.artifact_output_processing_job
(artifact_id, processing_job_id,
command_output_id)
VALUES (%s, %s, %s)"""
sql_args = [[aid, self.id, outid]
for outid, aid in mapping.items()]
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add(sql, sql_args, many=True)
self._update_and_launch_children(mapping)
self._set_status('success')
def _complete_artifact_definition(self, artifact_data):
""""Performs the needed steps to complete an artifact definition job
In order to complete an artifact definition job we need to create
the artifact, and then start all the jobs that were waiting for this
artifact to be created. Note that each artifact definition job creates
one and only one artifact.
Parameters
----------
artifact_data : {'filepaths': list of (str, str), 'artifact_type': str}
Dict with the artifact information. `filepaths` contains the list
of filepaths and filepath types for the artifact and
`artifact_type` the type of the artifact
Notes
-----
The `provenance` in the job.parameters can contain a `direct_creation`
flag to avoid having to wait for the complete job to create a new
artifact, which is normally ran during regular processing. Skipping is
fine because we are adding an artifact to an existing job outside of
regular processing
"""
with qdb.sql_connection.TRN:
atype = artifact_data['artifact_type']
filepaths = artifact_data['filepaths']
# We need to differentiate if this artifact is the
# result of a previous job or uploading
job_params = self.parameters.values
if job_params['provenance'] is not None:
# The artifact is a result from a previous job
provenance = loads(job_params['provenance'])
if provenance.get('direct_creation', False):
original_job = ProcessingJob(provenance['job'])
artifact = qdb.artifact.Artifact.create(
filepaths, atype,
parents=original_job.input_artifacts,
processing_parameters=original_job.parameters,
analysis=job_params['analysis'],
name=job_params['name'])
sql = """
INSERT INTO qiita.artifact_output_processing_job
(artifact_id, processing_job_id,
command_output_id)
VALUES (%s, %s, %s)"""
qdb.sql_connection.TRN.add(
sql, [artifact.id, original_job.id,
provenance['cmd_out_id']])
qdb.sql_connection.TRN.execute()
self._set_status('success')
else:
if provenance.get('data_type') is not None:
artifact_data = {'data_type': provenance['data_type'],
'artifact_data': artifact_data}
sql = """UPDATE qiita.processing_job_validator
SET artifact_info = %s
WHERE validator_id = %s"""
qdb.sql_connection.TRN.add(
sql, [dumps(artifact_data), self.id])
qdb.sql_connection.TRN.execute()
# Can't create the artifact until all validators
# are completed
self._set_status('waiting')
else:
# The artifact is uploaded by the user or is the initial
# artifact of an analysis
if ('analysis' in job_params and
job_params['analysis'] is not None):
pt = None
an = qdb.analysis.Analysis(job_params['analysis'])
sql = """SELECT data_type
FROM qiita.analysis_processing_job
WHERE analysis_id = %s
AND processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [an.id, self.id])
data_type = qdb.sql_connection.TRN.execute_fetchlast()
else:
pt = qdb.metadata_template.prep_template.PrepTemplate(
job_params['template'])
an = None
data_type = None
qdb.artifact.Artifact.create(
filepaths, atype, prep_template=pt, analysis=an,
data_type=data_type, name=job_params['name'])
self._set_status('success')
def _complete_artifact_transformation(self, artifacts_data):
"""Performs the needed steps to complete an artifact transformation job
In order to complete an artifact transformation job, we need to create
a validate job for each artifact output and submit it.
Parameters
----------
artifacts_data : dict of dicts
The generated artifact information keyed by output name.
The format of each of the internal dictionaries must be
{'filepaths': list of (str, str), 'artifact_type': str}
where `filepaths` contains the list of filepaths and filepath types
for the artifact and `artifact_type` the type of the artifact
Raises
------
QiitaDBError
If there is more than one prep information attached to the new
artifact
"""
validator_jobs = []
with qdb.sql_connection.TRN:
cmd_id = self.command.id
for out_name, a_data in artifacts_data.items():
# Correct the format of the filepaths parameter so we can
# create a validate job
filepaths = defaultdict(list)
for fp, fptype in a_data['filepaths']:
filepaths[fptype].append(fp)
atype = a_data['artifact_type']
# The validate job needs a prep information file. In theory,
# a job can be generated from more that one prep information
# file, so we check here if we have one or more templates. At
# this moment, If we allow more than one template, there is a
# fair amount of changes that need to be done on the plugins,
# so we are going to restrict the number of templates to one.
# Note that at this moment there is no way of generating an
# artifact from 2 or more artifacts, so we can impose this
# limitation now and relax it later.
templates = set()
for artifact in self.input_artifacts:
templates.update(pt.id for pt in artifact.prep_templates)
template = None
analysis = None
if len(templates) > 1:
raise qdb.exceptions.QiitaDBError(
"Currently only single prep template "
"is allowed, found %d" % len(templates))
elif len(templates) == 1:
template = templates.pop()
else:
# In this case we have 0 templates. What this means is that
# this artifact is being generated in the analysis pipeline
# All the artifacts included in the analysis pipeline
# belong to the same analysis, so we can just ask the
# first artifact for the analysis that it belongs to
analysis = self.input_artifacts[0].analysis.id
# Once the validate job completes, it needs to know if it has
# been generated from a command (and how) or if it has been
# uploaded. In order to differentiate these cases, we populate
# the provenance parameter with some information about the
# current job and how this artifact has been generated. This
# does not affect the plugins since they can ignore this
# parameter
sql = """SELECT command_output_id
FROM qiita.command_output
WHERE name = %s AND command_id = %s"""
qdb.sql_connection.TRN.add(sql, [out_name, cmd_id])
cmd_out_id = qdb.sql_connection.TRN.execute_fetchlast()
naming_params = self.command.naming_order
if naming_params:
params = self.parameters.values
art_name = "%s %s" % (
out_name, ' '.join([str(params[p])
for p in naming_params]))
else:
art_name = out_name
provenance = {'job': self.id,
'cmd_out_id': cmd_out_id,
'name': art_name}
# Get the validator command for the current artifact type and
# create a new job
# see also release_validators()
cmd = qdb.software.Command.get_validator(atype)
values_dict = {
'files': dumps(filepaths), 'artifact_type': atype,
'template': template, 'provenance': dumps(provenance),
'analysis': None}
if analysis is not None:
values_dict['analysis'] = analysis
validate_params = qdb.software.Parameters.load(
cmd, values_dict=values_dict)
validator_jobs.append(
ProcessingJob.create(self.user, validate_params, True))
# Change the current step of the job
self.step = "Validating outputs (%d remaining) via job(s) %s" % (
len(validator_jobs), ', '.join(['%s [%s]' % (
j.id, j.external_id) for j in validator_jobs]))
# Link all the validator jobs with the current job
self._set_validator_jobs(validator_jobs)
# Submit m validator jobs as n lists of jobs
n = qiita_config.trq_dependency_q_cnt
# taken from:
# https://www.geeksforgeeks.org/break-list-chunks-size-n-python/
lists = [validator_jobs[i * n:(i + 1) * n]
for i in range((len(validator_jobs) + n - 1) // n)]
for sub_list in lists:
# each sub_list will always have at least a lead_job
lead_job = sub_list.pop(0)
if not sub_list:
# sub_list is now empty
sub_list = None
lead_job.submit(dependent_jobs_list=sub_list)
# Submit the job that will release all the validators
plugin = qdb.software.Software.from_name_and_version(
'Qiita', 'alpha')
cmd = plugin.get_command('release_validators')
params = qdb.software.Parameters.load(
cmd, values_dict={'job': self.id})
job = ProcessingJob.create(self.user, params)
# Doing the submission outside of the transaction
job.submit()
def _set_validator_jobs(self, validator_jobs):
"""Sets the validator jobs for the current job
Parameters
----------
validator_jobs : list of ProcessingJob
The validator_jobs for the current job
"""
with qdb.sql_connection.TRN:
sql = """INSERT INTO qiita.processing_job_validator
(processing_job_id, validator_id)
VALUES (%s, %s)"""
sql_args = [[self.id, j.id] for j in validator_jobs]
qdb.sql_connection.TRN.add(sql, sql_args, many=True)
qdb.sql_connection.TRN.execute()
def complete(self, success, artifacts_data=None, error=None):
"""Completes the job, either with a success or error status
Parameters
----------
success : bool
Whether the job has completed successfully or not
artifacts_data : dict of dicts, optional
The generated artifact information keyed by output name.
The format of each of the internal dictionaries must be
{'filepaths': list of (str, str), 'artifact_type': str}
where `filepaths` contains the list of filepaths and filepath types
for the artifact and `artifact_type` the type of the artifact
error : str, optional
If the job was not successful, the error message
Raises
------
qiita_db.exceptions.QiitaDBOperationNotPermittedError
If the job is not in running state
"""
with qdb.sql_connection.TRN:
if success:
if self.status != 'running':
# If the job is not running, we only allow to complete it
# if it did not succeed
raise qdb.exceptions.QiitaDBOperationNotPermittedError(
"Can't complete job: not in a running state")
if artifacts_data:
if self.command.software.type == 'artifact definition':
# There is only one artifact created
_, a_data = artifacts_data.popitem()
self._complete_artifact_definition(a_data)
else:
self._complete_artifact_transformation(artifacts_data)
else:
self._set_status('success')
else:
self._set_error(error)
@property
def log(self):
"""The log entry attached to the job if it failed
Returns
-------
qiita_db.logger.LogEntry or None
If the status of the job is `error`, returns the LogEntry attached
to the job
"""
with qdb.sql_connection.TRN:
res = None
if self.status == 'error':
sql = """SELECT logging_id
FROM qiita.processing_job
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
log_id = qdb.sql_connection.TRN.execute_fetchlast()
res = qdb.logger.LogEntry(log_id)
return res
def _set_error(self, error):
"""Attaches a log entry to the job
Parameters
----------
error : str
The error message
Raises
------
qiita_db.exceptions.QiitaDBOperationNotPermittedError
If the status of the job is 'success'
"""
with qdb.sql_connection.TRN:
if self.status == 'success':
raise qdb.exceptions.QiitaDBOperationNotPermittedError(
"Can only set up the log for jobs whose status is 'error'")
self._set_status('error')
log = qdb.logger.LogEntry.create('Runtime', error)
sql = """UPDATE qiita.processing_job
SET logging_id = %s
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [log.id, self.id])
qdb.sql_connection.TRN.execute()
# All the children should be marked as failure
for c in self.children:
c.complete(False, error="Parent job '%s' failed." % self.id)
@property
def heartbeat(self):
"""The timestamp of the last heartbeat received from the job
Returns
-------
datetime
The last heartbeat timestamp
"""
with qdb.sql_connection.TRN:
sql = """SELECT heartbeat
FROM qiita.processing_job
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchlast()
def update_heartbeat_state(self):
"""Updates the heartbeat of the job
In case that the job is in `queued` status, it changes the status to
`running`.
Raises
------
QiitaDBOperationNotPermittedError
If the job is already completed
"""
with qdb.sql_connection.TRN:
status = self.status
if status == 'queued':
self._set_status('running')
elif status != 'running':
raise qdb.exceptions.QiitaDBOperationNotPermittedError(
"Can't execute heartbeat on job: already completed")
sql = """UPDATE qiita.processing_job
SET heartbeat = %s
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [datetime.now(), self.id])
qdb.sql_connection.TRN.execute()
@property
def step(self):
"""Returns the current step of the job
Returns
-------
str
The current step of the job
"""
with qdb.sql_connection.TRN:
sql = """SELECT step
FROM qiita.processing_job
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchlast()
@step.setter
def step(self, value):
"""Sets the current step of the job
Parameters
----------
value : str
The new current step of the job
Raises
------
qiita_db.exceptions.QiitaDBOperationNotPermittedError
If the status of the job is not 'running'
"""
if self.status != 'running':
raise qdb.exceptions.QiitaDBOperationNotPermittedError(
"Cannot change the step of a job whose status is not "
"'running'")
sql = """UPDATE qiita.processing_job
SET step = %s
WHERE processing_job_id = %s"""
qdb.sql_connection.perform_as_transaction(sql, [value, self.id])
@property
def children(self):
"""The children jobs
Returns
-------
generator of qiita_db.processing_job.ProcessingJob
The children jobs
"""
with qdb.sql_connection.TRN:
sql = """SELECT child_id
FROM qiita.parent_processing_job
WHERE parent_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
for jid in qdb.sql_connection.TRN.execute_fetchflatten():
yield ProcessingJob(jid)
@property
def validator_jobs(self):
"""The validators of this job
Returns
-------
generator of qiita_db.processing_job.ProcessingJob
The validators of this job
"""
with qdb.sql_connection.TRN:
sql = """SELECT validator_id
FROM qiita.processing_job_validator pjv
JOIN qiita.processing_job pj
ON pjv.validator_id = pj.processing_job_id
JOIN qiita.processing_job_status USING (
processing_job_status_id)
WHERE pjv.processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
for jid in qdb.sql_connection.TRN.execute_fetchflatten():
yield ProcessingJob(jid)
def _update_children(self, mapping):
"""Updates the children of the current job to populate the input params
Parameters
----------
mapping : dict of {int: int}
The mapping between output parameter and artifact
Returns
-------
list of qiita_db.processing_job.ProcessingJob
The list of childrens that are ready to be submitted
"""
ready = []
with qdb.sql_connection.TRN:
sql = """SELECT command_output_id, name
FROM qiita.command_output
WHERE command_output_id IN %s"""
sql_args = [tuple(mapping.keys())]
qdb.sql_connection.TRN.add(sql, sql_args)
res = qdb.sql_connection.TRN.execute_fetchindex()
new_map = {name: mapping[oid] for oid, name in res}
sql = """SELECT command_parameters, pending
FROM qiita.processing_job
WHERE processing_job_id = %s"""
sql_update = """UPDATE qiita.processing_job
SET command_parameters = %s,
pending = %s
WHERE processing_job_id = %s"""
sql_link = """INSERT INTO qiita.artifact_processing_job
(artifact_id, processing_job_id)
VALUES (%s, %s)"""
for c in self.children:
qdb.sql_connection.TRN.add(sql, [c.id])
params, pending = qdb.sql_connection.TRN.execute_fetchflatten()
for pname, out_name in pending[self.id].items():
a_id = new_map[out_name]
params[pname] = str(a_id)
del pending[self.id]
# Link the input artifact with the child job
qdb.sql_connection.TRN.add(sql_link, [a_id, c.id])
# Force to insert a NULL in the DB if pending is empty
pending = pending if pending else None
qdb.sql_connection.TRN.add(sql_update,
[dumps(params), pending, c.id])
qdb.sql_connection.TRN.execute()
if pending is None:
# The child already has all the parameters
# Add it to the ready list
ready.append(c)
return ready
def _update_and_launch_children(self, mapping):
"""Updates the children of the current job to populate the input params
Parameters
----------
mapping : dict of {int: int}
The mapping between output parameter and artifact
"""
ready = self._update_children(mapping)
# Submit all the children that already have all the input parameters
for c in ready:
c.submit()
# some jobs create several children jobs/validators and this can
# clog the submission process; giving it a second to avoid this
sleep(1)
@property
def outputs(self):
"""The outputs of the job
Returns
-------
dict of {str: qiita_db.artifact.Artifact}
The outputs of the job keyed by output name
"""
with qdb.sql_connection.TRN:
if self.status != 'success':
raise qdb.exceptions.QiitaDBOperationNotPermittedError(
"Can't return the outputs of a non-success job")
sql = """SELECT artifact_id, name
FROM qiita.artifact_output_processing_job
JOIN qiita.command_output USING (command_output_id)
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
return {
name: qdb.artifact.Artifact(aid)
for aid, name in qdb.sql_connection.TRN.execute_fetchindex()}
@property
def processing_job_workflow(self):
"""The processing job workflow
Returns
-------
ProcessingWorkflow
The processing job workflow the job
"""
with qdb.sql_connection.TRN:
# Retrieve the workflow root jobs
sql = """SELECT get_processing_workflow_roots
FROM qiita.get_processing_workflow_roots(%s)"""
qdb.sql_connection.TRN.add(sql, [self.id])
res = qdb.sql_connection.TRN.execute_fetchindex()
if res:
sql = """SELECT processing_job_workflow_id
FROM qiita.processing_job_workflow_root
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [res[0][0]])
r = qdb.sql_connection.TRN.execute_fetchindex()
return (qdb.processing_job.ProcessingWorkflow(r[0][0]) if r
else None)
else:
return None
@property
def pending(self):
"""A dictionary with the information about the predecessor jobs
Returns
-------
dict
A dict with {job_id: {parameter_name: output_name}}"""
with qdb.sql_connection.TRN:
sql = """SELECT pending
FROM qiita.processing_job
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
res = qdb.sql_connection.TRN.execute_fetchlast()
return res if res is not None else {}
@property
def hidden(self):
"""Whether the job is hidden or not
Returns
-------
bool
Whether the jobs is hidden or not
"""
with qdb.sql_connection.TRN:
sql = """SELECT hidden
FROM qiita.processing_job
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchlast()
def hide(self):
"""Hides the job from the user
Raises
------
QiitaDBOperationNotPermittedError
If the job is not in the error status
"""
with qdb.sql_connection.TRN:
status = self.status
if status != 'error':
raise qdb.exceptions.QiitaDBOperationNotPermittedError(
'Only jobs in error status can be hidden. Current status: '
'%s' % status)
sql = """UPDATE qiita.processing_job
SET hidden = %s
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [True, self.id])
qdb.sql_connection.TRN.execute()
@property
def shape(self):
"""Number of samples, metadata columns and input size of this job
Returns
-------
int, int, int
Number of samples, metadata columns and input size. None means it
couldn't be calculated
"""
samples = None
columns = None
study_id = None
analysis_id = None
artifact = None
input_size = None
parameters = self.parameters.values
if self.command.name == 'Validate':
# Validate only has two options to calculate it's size: template (a
# job that has a preparation linked) or analysis (is from an
# analysis).
if 'template' in parameters:
try:
pt = qdb.metadata_template.prep_template.PrepTemplate(
parameters['template'])
except qdb.exceptions.QiitaDBUnknownIDError:
pass
else:
study_id = pt.study_id
elif 'analysis' in parameters:
analysis_id = parameters['analysis']
elif self.command.name == 'build_analysis_files':
# build analysis is a special case because the analysis doesn't
# exist yet
sanalysis = qdb.analysis.Analysis(parameters['analysis']).samples
samples = sum([len(sams) for sams in sanalysis.values()])
input_size = sum([fp['fp_size'] for aid in sanalysis
for fp in qdb.artifact.Artifact(aid).filepaths])
elif self.command.software.name == 'Qiita':
if 'study' in parameters:
study_id = parameters['study']
elif 'study_id' in parameters:
study_id = parameters['study_id']
elif 'analysis' in parameters:
analysis_id = parameters['analysis']
elif 'analysis_id' in parameters:
analysis_id = parameters['analysis_id']
elif 'artifact' in parameters:
try:
artifact = qdb.artifact.Artifact(parameters['artifact'])
except qdb.exceptions.QiitaDBUnknownIDError:
pass
elif self.input_artifacts:
artifact = self.input_artifacts[0]
input_size = sum([fp['fp_size'] for a in self.input_artifacts
for fp in a.filepaths])
# if there is an artifact, then we need to get the study_id/analysis_id
if artifact is not None:
if artifact.study is not None:
study_id = artifact.study.id
elif artifact.analysis is not None:
analysis_id = artifact.analysis.id
# now retrieve the sample/columns based on study_id/analysis_id
if study_id is not None:
try:
st = qdb.study.Study(study_id).sample_template
except qdb.exceptions.QiitaDBUnknownIDError:
pass
else:
samples = len(st)
columns = len(st.categories())
elif analysis_id is not None:
try:
analysis = qdb.analysis.Analysis(analysis_id)
except qdb.exceptions.QiitaDBUnknownIDError:
pass
else:
mfp = qdb.util.get_filepath_information(
analysis.mapping_file)['fullpath']
samples, columns = pd.read_csv(
mfp, sep='\t', dtype=str).shape
input_size = sum([fp['fp_size'] for aid in analysis.samples for
fp in qdb.artifact.Artifact(aid).filepaths])
return samples, columns, input_size
class ProcessingWorkflow(qdb.base.QiitaObject):
"""Models a workflow defined by the user
Parameters
----------
user : qiita_db.user.User
The user that modeled the workflow
root : list of qiita_db.processing_job.ProcessingJob
The first job in the workflow
"""
_table = "processing_job_workflow"
@classmethod
def _common_creation_steps(cls, user, root_jobs, name=None):
"""Executes the common creation steps
Parameters
----------
user : qiita_db.user.User
The user creating the workflow
root_jobs : list of qiita_db.processing_job.ProcessingJob
The root jobs of the workflow
name : str, optional
The name of the workflow. Default: generated from user's name
"""
with qdb.sql_connection.TRN:
# Insert the workflow in the processing_job_workflow table
name = name if name else "%s's workflow" % user.info['name']
sql = """INSERT INTO qiita.processing_job_workflow (email, name)
VALUES (%s, %s)
RETURNING processing_job_workflow_id"""
qdb.sql_connection.TRN.add(sql, [user.email, name])
w_id = qdb.sql_connection.TRN.execute_fetchlast()
# Connect the workflow with it's initial set of jobs
sql = """INSERT INTO qiita.processing_job_workflow_root
(processing_job_workflow_id, processing_job_id)
VALUES (%s, %s)"""
sql_args = [[w_id, j.id] for j in root_jobs]
qdb.sql_connection.TRN.add(sql, sql_args, many=True)
qdb.sql_connection.TRN.execute()
return cls(w_id)
@classmethod
def from_default_workflow(cls, user, dflt_wf, req_params, name=None,
force=False):
"""Creates a new processing workflow from a default workflow
Parameters
----------
user : qiita_db.user.User
The user creating the workflow
dflt_wf : qiita_db.software.DefaultWorkflow
The default workflow
req_params : dict of {qdb.software.Command: dict of {str: object}}
The required parameters values for the source commands in the
workflow, keyed by command. The inner dicts are keyed by
parameter name.
name : str, optional
Name of the workflow. Default: generated from user's name
force : bool
Force creation on duplicated parameters
Returns
-------
qiita_db.processing_job.ProcessingWorkflow
The newly created workflow
"""
with qdb.sql_connection.TRN:
dflt_g = dflt_wf.graph
# Find the roots of the workflow. That is, the nodes that do not
# have a parent in the graph (in_degree = 0)
in_degrees = dflt_g.in_degree()
# We can potentially access this information from the nodes
# multiple times, so caching in here
# [0] in_degrees returns a tuple, where [0] is the element we want
all_nodes = {}
roots = {}
for node, position in in_degrees:
if position == 0:
roots[node] = (node.command, node.parameters)
all_nodes[node] = (node.command, node.parameters)
# Check that we have all the required parameters
root_cmds = set(c for c, _ in roots.values())
if root_cmds != set(req_params):
error_msg = ['Provided required parameters do not match the '
'initial set of commands for the workflow.']
missing = [c.name for c in root_cmds - set(req_params)]
if missing:
error_msg.append(
' Command(s) "%s" are missing the required parameter '
'set.' % ', '.join(missing))
extra = [c.name for c in set(req_params) - root_cmds]
if extra:
error_msg.append(
' Paramters for command(s) "%s" have been provided, '
'but they are not the initial commands for the '
'workflow.' % ', '.join(extra))
raise qdb.exceptions.QiitaDBError(''.join(error_msg))
# Start creating the root jobs
node_to_job = {
n: ProcessingJob.create(
user,
qdb.software.Parameters.from_default_params(
p, req_params[c]), force)
for n, (c, p) in roots.items()}
root_jobs = node_to_job.values()
# SQL used to create the edges between jobs
sql = """INSERT INTO qiita.parent_processing_job
(parent_id, child_id)
VALUES (%s, %s)"""
# Create the rest of the jobs. These are different form the root
# jobs because they depend on other jobs to complete in order to be
# submitted
for n in nx.topological_sort(dflt_g):
if n in node_to_job:
# We have already visited this node
# (because it is a root node)
continue
cmd, dflt_params = all_nodes[n]
job_req_params = {}
parent_ids = []
# Each incoming edge represents an artifact that is generated
# by the source job of the edge
for source, dest, data in dflt_g.in_edges(n, data=True):
# Retrieve the id of the parent job - it already exists
# because we are visiting the nodes in topological order
source_id = node_to_job[source].id
parent_ids.append(source_id)
# Get the connections between the job and the source
connections = data['connections'].connections
for out, in_param in connections:
# We take advantage of the fact the parameters are
# stored in JSON to encode the name of the output
# artifact from the previous job
job_req_params[in_param] = [source_id, out]
# At this point we should have all the requried parameters for
# the current job, so create it
new_job = ProcessingJob.create(
user, qdb.software.Parameters.from_default_params(
dflt_params, job_req_params), force)
node_to_job[n] = new_job
# Create the parent-child links in the DB
sql_args = [[pid, new_job.id] for pid in parent_ids]
qdb.sql_connection.TRN.add(sql, sql_args, many=True)
return cls._common_creation_steps(user, root_jobs, name)
@classmethod
def from_scratch(cls, user, parameters, name=None, force=False):
"""Creates a new processing workflow from scratch
Parameters
----------
user : qiita_db.user.User
The user creating the workflow
parameters : qiita_db.software.Parameters
The parameters of the first job in the workflow
name : str, optional
Name of the workflow. Default: generated from user's name
force : bool
Force creation on duplicated parameters
Returns
-------
qiita_db.processing_job.ProcessingWorkflow
The newly created workflow
"""
job = ProcessingJob.create(user, parameters, force)
return cls._common_creation_steps(user, [job], name)
@property
def name(self):
""""The name of the workflow
Returns
-------
str
The name of the workflow
"""
with qdb.sql_connection.TRN:
sql = """SELECT name
FROM qiita.processing_job_workflow
WHERE processing_job_workflow_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchlast()
@property
def user(self):
"""The user that created the workflow
Returns
-------
qdb.user.User
The user that created the workflow
"""
with qdb.sql_connection.TRN:
sql = """SELECT email
FROM qiita.processing_job_workflow
WHERE processing_job_workflow_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
email = qdb.sql_connection.TRN.execute_fetchlast()
return qdb.user.User(email)
@property
def graph(self):
"""Returns the graph of jobs that represent the workflow
Returns
-------
networkx.DiGraph
The graph representing the workflow
"""
g = nx.DiGraph()
with qdb.sql_connection.TRN:
# Retrieve all graph workflow nodes
sql = """SELECT parent_id, child_id
FROM qiita.get_processing_workflow_edges(%s)"""
qdb.sql_connection.TRN.add(sql, [self.id])
edges = qdb.sql_connection.TRN.execute_fetchindex()
nodes = {}
if edges:
nodes = {jid: ProcessingJob(jid)
for jid in set(chain.from_iterable(edges))}
edges = [(nodes[s], nodes[d]) for s, d in edges]
g.add_edges_from(edges)
# It is possible that there are root jobs that doesn't have any
# child, so they do not appear on edge list
sql = """SELECT processing_job_id
FROM qiita.processing_job_workflow_root
WHERE processing_job_workflow_id = %s"""
sql_args = [self.id]
if nodes:
sql += " AND processing_job_id NOT IN %s"
sql_args.append(tuple(nodes))
qdb.sql_connection.TRN.add(sql, sql_args)
nodes = [
ProcessingJob(jid)
for jid in qdb.sql_connection.TRN.execute_fetchflatten()]
g.add_nodes_from(nodes)
return g
def _raise_if_not_in_construction(self):
"""Raises an error if the workflow is not in construction
Raises
------
qiita_db.exceptions.QiitaDBOperationNotPermittedError
If the workflow is not in construction
"""
with qdb.sql_connection.TRN:
# To know if the workflow is in construction or not it suffices
# to look at the status of the root jobs
sql = """SELECT DISTINCT processing_job_status
FROM qiita.processing_job_workflow_root
JOIN qiita.processing_job USING (processing_job_id)
JOIN qiita.processing_job_status
USING (processing_job_status_id)
WHERE processing_job_workflow_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
res = qdb.sql_connection.TRN.execute_fetchflatten()
# If the above SQL query returns a single element and the value
# is different from in construction, it means that all the jobs
# in the workflow are in the same status and it is not
# 'in_construction', hence raise the error. If the above SQL query
# returns more than value (len(res) > 1) it means that the workflow
# is no longer in construction cause some jobs have been submited
# for processing. Note that if the above query doesn't retrun any
# value, it means that no jobs are in the workflow and that means
# that the workflow is in construction.
if (len(res) == 1 and res[0] != 'in_construction') or len(res) > 1:
# The workflow is no longer in construction, raise an error
raise qdb.exceptions.QiitaDBOperationNotPermittedError(
"Workflow not in construction")
def add(self, dflt_params, connections=None, req_params=None,
opt_params=None, force=False):
"""Adds a new job to the workflow
Parameters
----------
dflt_params : qiita_db.software.DefaultParameters
The DefaultParameters object used
connections : dict of {qiita_db.processing_job.ProcessingJob:
{str: str}}, optional
Dictionary keyed by the jobs in which the new job depends on,
and values is a dict mapping between source outputs and new job
inputs
req_params : dict of {str: object}, optional
Any extra required parameter values, keyed by parameter name.
Default: None, all the requried parameters are provided through
the `connections` dictionary
opt_params : dict of {str: object}, optional
The optional parameters to change from the default set, keyed by
parameter name. Default: None, use the values in `dflt_params`
force : bool
Force creation on duplicated parameters
Raises
------
qiita_db.exceptions.QiitaDBOperationNotPermittedError
If the workflow is not in construction
"""
with qdb.sql_connection.TRN:
self._raise_if_not_in_construction()
if connections:
# The new Job depends on previous jobs in the workflow
req_params = req_params if req_params else {}
# Loop through all the connections to add the relevant
# parameters
for source, mapping in connections.items():
source_id = source.id
for out, in_param in mapping.items():
req_params[in_param] = [source_id, out]
new_job = ProcessingJob.create(
self.user, qdb.software.Parameters.from_default_params(
dflt_params, req_params, opt_params=opt_params), force)
# SQL used to create the edges between jobs
sql = """INSERT INTO qiita.parent_processing_job
(parent_id, child_id)
VALUES (%s, %s)"""
sql_args = [[s.id, new_job.id] for s in connections]
qdb.sql_connection.TRN.add(sql, sql_args, many=True)
qdb.sql_connection.TRN.execute()
else:
# The new job doesn't depend on any previous job in the
# workflow, so it is a new root job
new_job = ProcessingJob.create(
self.user, qdb.software.Parameters.from_default_params(
dflt_params, req_params, opt_params=opt_params), force)
sql = """INSERT INTO qiita.processing_job_workflow_root
(processing_job_workflow_id, processing_job_id)
VALUES (%s, %s)"""
sql_args = [self.id, new_job.id]
qdb.sql_connection.TRN.add(sql, sql_args)
qdb.sql_connection.TRN.execute()
return new_job
def remove(self, job, cascade=False):
"""Removes a given job from the workflow
Parameters
----------
job : qiita_db.processing_job.ProcessingJob
The job to be removed
cascade : bool, optional
If true, remove the also the input job's children. Default: False.
Raises
------
qiita_db.exceptions.QiitaDBOperationNotPermittedError
If the workflow is not in construction
If the job to be removed has children and `cascade` is `False`
"""
with qdb.sql_connection.TRN:
self._raise_if_not_in_construction()
# Check if the given job has children
children = list(job.children)
if children:
if not cascade:
raise qdb.exceptions.QiitaDBOperationNotPermittedError(
"Can't remove job '%s': it has children" % job.id)
else:
# We need to remove all job's children, remove them first
# and then remove the current job
for c in children:
self.remove(c, cascade=True)
# Remove any edges (it can only appear as a child)
sql = """DELETE FROM qiita.parent_processing_job
WHERE child_id = %s"""
qdb.sql_connection.TRN.add(sql, [job.id])
# Remove as root job
sql = """DELETE FROM qiita.processing_job_workflow_root
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [job.id])
# Remove the input reference
sql = """DELETE FROM qiita.artifact_processing_job
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [job.id])
# Remove the job
sql = """DELETE FROM qiita.processing_job
WHERE processing_job_id = %s"""
qdb.sql_connection.TRN.add(sql, [job.id])
qdb.sql_connection.TRN.execute()
def submit(self):
"""Submits the workflow to execution
Raises
------
qiita_db.exceptions.QiitaDBOperationNotPermittedError
If the workflow is not in construction
"""
with qdb.sql_connection.TRN:
self._raise_if_not_in_construction()
g = self.graph
# In order to avoid potential race conditions, we are going to set
# all the children in 'waiting' status before submitting
# the root nodes
in_degrees = dict(g.in_degree())
roots = []
for job, degree in in_degrees.items():
if degree == 0:
roots.append(job)
else:
job._set_status('waiting')
for job in roots:
job.submit()
| bsd-3-clause |
yuanagain/seniorthesis | venv/lib/python2.7/site-packages/matplotlib/dviread.py | 4 | 33435 | """
An experimental module for reading dvi files output by TeX. Several
limitations make this not (currently) useful as a general-purpose dvi
preprocessor, but it is currently used by the pdf backend for
processing usetex text.
Interface::
dvi = Dvi(filename, 72)
# iterate over pages (but only one page is supported for now):
for page in dvi:
w, h, d = page.width, page.height, page.descent
for x,y,font,glyph,width in page.text:
fontname = font.texname
pointsize = font.size
...
for x,y,height,width in page.boxes:
...
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange
import errno
import matplotlib
import matplotlib.cbook as mpl_cbook
from matplotlib.compat import subprocess
from matplotlib import rcParams
import numpy as np
import struct
import sys
import os
if six.PY3:
def ord(x):
return x
_dvistate = mpl_cbook.Bunch(pre=0, outer=1, inpage=2, post_post=3, finale=4)
class Dvi(object):
"""
A dvi ("device-independent") file, as produced by TeX.
The current implementation only reads the first page and does not
even attempt to verify the postamble.
"""
def __init__(self, filename, dpi):
"""
Initialize the object. This takes the filename as input and
opens the file; actually reading the file happens when
iterating through the pages of the file.
"""
matplotlib.verbose.report('Dvi: ' + filename, 'debug')
self.file = open(filename, 'rb')
self.dpi = dpi
self.fonts = {}
self.state = _dvistate.pre
self.baseline = self._get_baseline(filename)
def _get_baseline(self, filename):
if rcParams['text.latex.preview']:
base, ext = os.path.splitext(filename)
baseline_filename = base + ".baseline"
if os.path.exists(baseline_filename):
with open(baseline_filename, 'rb') as fd:
l = fd.read().split()
height, depth, width = l
return float(depth)
return None
def __iter__(self):
"""
Iterate through the pages of the file.
Returns (text, boxes) pairs, where:
text is a list of (x, y, fontnum, glyphnum, width) tuples
boxes is a list of (x, y, height, width) tuples
The coordinates are transformed into a standard Cartesian
coordinate system at the dpi value given when initializing.
The coordinates are floating point numbers, but otherwise
precision is not lost and coordinate values are not clipped to
integers.
"""
while True:
have_page = self._read()
if have_page:
yield self._output()
else:
break
def close(self):
"""
Close the underlying file if it is open.
"""
if not self.file.closed:
self.file.close()
def _output(self):
"""
Output the text and boxes belonging to the most recent page.
page = dvi._output()
"""
minx, miny, maxx, maxy = np.inf, np.inf, -np.inf, -np.inf
maxy_pure = -np.inf
for elt in self.text + self.boxes:
if len(elt) == 4: # box
x,y,h,w = elt
e = 0 # zero depth
else: # glyph
x,y,font,g,w = elt
h,e = font._height_depth_of(g)
minx = min(minx, x)
miny = min(miny, y - h)
maxx = max(maxx, x + w)
maxy = max(maxy, y + e)
maxy_pure = max(maxy_pure, y)
if self.dpi is None:
# special case for ease of debugging: output raw dvi coordinates
return mpl_cbook.Bunch(text=self.text, boxes=self.boxes,
width=maxx-minx, height=maxy_pure-miny,
descent=descent)
d = self.dpi / (72.27 * 2**16) # from TeX's "scaled points" to dpi units
if self.baseline is None:
descent = (maxy - maxy_pure) * d
else:
descent = self.baseline
text = [ ((x-minx)*d, (maxy-y)*d - descent, f, g, w*d)
for (x,y,f,g,w) in self.text ]
boxes = [ ((x-minx)*d, (maxy-y)*d - descent, h*d, w*d) for (x,y,h,w) in self.boxes ]
return mpl_cbook.Bunch(text=text, boxes=boxes,
width=(maxx-minx)*d,
height=(maxy_pure-miny)*d,
descent=descent)
def _read(self):
"""
Read one page from the file. Return True if successful,
False if there were no more pages.
"""
while True:
byte = ord(self.file.read(1)[0])
self._dispatch(byte)
if byte == 140: # end of page
return True
if self.state == _dvistate.post_post: # end of file
self.close()
return False
def _arg(self, nbytes, signed=False):
"""
Read and return an integer argument *nbytes* long.
Signedness is determined by the *signed* keyword.
"""
str = self.file.read(nbytes)
value = ord(str[0])
if signed and value >= 0x80:
value = value - 0x100
for i in range(1, nbytes):
value = 0x100*value + ord(str[i])
return value
def _dispatch(self, byte):
"""
Based on the opcode *byte*, read the correct kinds of
arguments from the dvi file and call the method implementing
that opcode with those arguments.
"""
if 0 <= byte <= 127: self._set_char(byte)
elif byte == 128: self._set_char(self._arg(1))
elif byte == 129: self._set_char(self._arg(2))
elif byte == 130: self._set_char(self._arg(3))
elif byte == 131: self._set_char(self._arg(4, True))
elif byte == 132: self._set_rule(self._arg(4, True), self._arg(4, True))
elif byte == 133: self._put_char(self._arg(1))
elif byte == 134: self._put_char(self._arg(2))
elif byte == 135: self._put_char(self._arg(3))
elif byte == 136: self._put_char(self._arg(4, True))
elif byte == 137: self._put_rule(self._arg(4, True), self._arg(4, True))
elif byte == 138: self._nop()
elif byte == 139: self._bop(*[self._arg(4, True) for i in range(11)])
elif byte == 140: self._eop()
elif byte == 141: self._push()
elif byte == 142: self._pop()
elif byte == 143: self._right(self._arg(1, True))
elif byte == 144: self._right(self._arg(2, True))
elif byte == 145: self._right(self._arg(3, True))
elif byte == 146: self._right(self._arg(4, True))
elif byte == 147: self._right_w(None)
elif byte == 148: self._right_w(self._arg(1, True))
elif byte == 149: self._right_w(self._arg(2, True))
elif byte == 150: self._right_w(self._arg(3, True))
elif byte == 151: self._right_w(self._arg(4, True))
elif byte == 152: self._right_x(None)
elif byte == 153: self._right_x(self._arg(1, True))
elif byte == 154: self._right_x(self._arg(2, True))
elif byte == 155: self._right_x(self._arg(3, True))
elif byte == 156: self._right_x(self._arg(4, True))
elif byte == 157: self._down(self._arg(1, True))
elif byte == 158: self._down(self._arg(2, True))
elif byte == 159: self._down(self._arg(3, True))
elif byte == 160: self._down(self._arg(4, True))
elif byte == 161: self._down_y(None)
elif byte == 162: self._down_y(self._arg(1, True))
elif byte == 163: self._down_y(self._arg(2, True))
elif byte == 164: self._down_y(self._arg(3, True))
elif byte == 165: self._down_y(self._arg(4, True))
elif byte == 166: self._down_z(None)
elif byte == 167: self._down_z(self._arg(1, True))
elif byte == 168: self._down_z(self._arg(2, True))
elif byte == 169: self._down_z(self._arg(3, True))
elif byte == 170: self._down_z(self._arg(4, True))
elif 171 <= byte <= 234: self._fnt_num(byte-171)
elif byte == 235: self._fnt_num(self._arg(1))
elif byte == 236: self._fnt_num(self._arg(2))
elif byte == 237: self._fnt_num(self._arg(3))
elif byte == 238: self._fnt_num(self._arg(4, True))
elif 239 <= byte <= 242:
len = self._arg(byte-238)
special = self.file.read(len)
self._xxx(special)
elif 243 <= byte <= 246:
k = self._arg(byte-242, byte==246)
c, s, d, a, l = [ self._arg(x) for x in (4, 4, 4, 1, 1) ]
n = self.file.read(a+l)
self._fnt_def(k, c, s, d, a, l, n)
elif byte == 247:
i, num, den, mag, k = [ self._arg(x) for x in (1, 4, 4, 4, 1) ]
x = self.file.read(k)
self._pre(i, num, den, mag, x)
elif byte == 248: self._post()
elif byte == 249: self._post_post()
else:
raise ValueError("unknown command: byte %d"%byte)
def _pre(self, i, num, den, mag, comment):
if self.state != _dvistate.pre:
raise ValueError("pre command in middle of dvi file")
if i != 2:
raise ValueError("Unknown dvi format %d"%i)
if num != 25400000 or den != 7227 * 2**16:
raise ValueError("nonstandard units in dvi file")
# meaning: TeX always uses those exact values, so it
# should be enough for us to support those
# (There are 72.27 pt to an inch so 7227 pt =
# 7227 * 2**16 sp to 100 in. The numerator is multiplied
# by 10^5 to get units of 10**-7 meters.)
if mag != 1000:
raise ValueError("nonstandard magnification in dvi file")
# meaning: LaTeX seems to frown on setting \mag, so
# I think we can assume this is constant
self.state = _dvistate.outer
def _set_char(self, char):
if self.state != _dvistate.inpage:
raise ValueError("misplaced set_char in dvi file")
self._put_char(char)
self.h += self.fonts[self.f]._width_of(char)
def _set_rule(self, a, b):
if self.state != _dvistate.inpage:
raise ValueError("misplaced set_rule in dvi file")
self._put_rule(a, b)
self.h += b
def _put_char(self, char):
if self.state != _dvistate.inpage:
raise ValueError("misplaced put_char in dvi file")
font = self.fonts[self.f]
if font._vf is None:
self.text.append((self.h, self.v, font, char,
font._width_of(char)))
else:
scale = font._scale
for x, y, f, g, w in font._vf[char].text:
newf = DviFont(scale=_mul2012(scale, f._scale),
tfm=f._tfm, texname=f.texname, vf=f._vf)
self.text.append((self.h + _mul2012(x, scale),
self.v + _mul2012(y, scale),
newf, g, newf._width_of(g)))
self.boxes.extend([(self.h + _mul2012(x, scale),
self.v + _mul2012(y, scale),
_mul2012(a, scale), _mul2012(b, scale))
for x, y, a, b in font._vf[char].boxes])
def _put_rule(self, a, b):
if self.state != _dvistate.inpage:
raise ValueError("misplaced put_rule in dvi file")
if a > 0 and b > 0:
self.boxes.append((self.h, self.v, a, b))
def _nop(self):
pass
def _bop(self, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, p):
if self.state != _dvistate.outer:
raise ValueError("misplaced bop in dvi file (state %d)" % self.state)
self.state = _dvistate.inpage
self.h, self.v, self.w, self.x, self.y, self.z = 0, 0, 0, 0, 0, 0
self.stack = []
self.text = [] # list of (x,y,fontnum,glyphnum)
self.boxes = [] # list of (x,y,width,height)
def _eop(self):
if self.state != _dvistate.inpage:
raise ValueError("misplaced eop in dvi file")
self.state = _dvistate.outer
del self.h, self.v, self.w, self.x, self.y, self.z, self.stack
def _push(self):
if self.state != _dvistate.inpage:
raise ValueError("misplaced push in dvi file")
self.stack.append((self.h, self.v, self.w, self.x, self.y, self.z))
def _pop(self):
if self.state != _dvistate.inpage:
raise ValueError("misplaced pop in dvi file")
self.h, self.v, self.w, self.x, self.y, self.z = self.stack.pop()
def _right(self, b):
if self.state != _dvistate.inpage:
raise ValueError("misplaced right in dvi file")
self.h += b
def _right_w(self, new_w):
if self.state != _dvistate.inpage:
raise ValueError("misplaced w in dvi file")
if new_w is not None:
self.w = new_w
self.h += self.w
def _right_x(self, new_x):
if self.state != _dvistate.inpage:
raise ValueError("misplaced x in dvi file")
if new_x is not None:
self.x = new_x
self.h += self.x
def _down(self, a):
if self.state != _dvistate.inpage:
raise ValueError("misplaced down in dvi file")
self.v += a
def _down_y(self, new_y):
if self.state != _dvistate.inpage:
raise ValueError("misplaced y in dvi file")
if new_y is not None:
self.y = new_y
self.v += self.y
def _down_z(self, new_z):
if self.state != _dvistate.inpage:
raise ValueError("misplaced z in dvi file")
if new_z is not None:
self.z = new_z
self.v += self.z
def _fnt_num(self, k):
if self.state != _dvistate.inpage:
raise ValueError("misplaced fnt_num in dvi file")
self.f = k
def _xxx(self, special):
if six.PY3:
matplotlib.verbose.report(
'Dvi._xxx: encountered special: %s'
% ''.join([(32 <= ord(ch) < 127) and chr(ch)
or '<%02x>' % ord(ch)
for ch in special]),
'debug')
else:
matplotlib.verbose.report(
'Dvi._xxx: encountered special: %s'
% ''.join([(32 <= ord(ch) < 127) and ch
or '<%02x>' % ord(ch)
for ch in special]),
'debug')
def _fnt_def(self, k, c, s, d, a, l, n):
fontname = n[-l:].decode('ascii')
tfm = _tfmfile(fontname)
if tfm is None:
if six.PY2:
error_class = OSError
else:
error_class = FileNotFoundError
raise error_class("missing font metrics file: %s" % fontname)
if c != 0 and tfm.checksum != 0 and c != tfm.checksum:
raise ValueError('tfm checksum mismatch: %s'%n)
vf = _vffile(fontname)
self.fonts[k] = DviFont(scale=s, tfm=tfm, texname=n, vf=vf)
def _post(self):
if self.state != _dvistate.outer:
raise ValueError("misplaced post in dvi file")
self.state = _dvistate.post_post
# TODO: actually read the postamble and finale?
# currently post_post just triggers closing the file
def _post_post(self):
raise NotImplementedError
class DviFont(object):
"""
Object that holds a font's texname and size, supports comparison,
and knows the widths of glyphs in the same units as the AFM file.
There are also internal attributes (for use by dviread.py) that
are *not* used for comparison.
The size is in Adobe points (converted from TeX points).
.. attribute:: texname
Name of the font as used internally by TeX and friends. This
is usually very different from any external font names, and
:class:`dviread.PsfontsMap` can be used to find the external
name of the font.
.. attribute:: size
Size of the font in Adobe points, converted from the slightly
smaller TeX points.
.. attribute:: widths
Widths of glyphs in glyph-space units, typically 1/1000ths of
the point size.
"""
__slots__ = ('texname', 'size', 'widths', '_scale', '_vf', '_tfm')
def __init__(self, scale, tfm, texname, vf):
if six.PY3 and isinstance(texname, bytes):
texname = texname.decode('ascii')
self._scale, self._tfm, self.texname, self._vf = \
scale, tfm, texname, vf
self.size = scale * (72.0 / (72.27 * 2**16))
try:
nchars = max(six.iterkeys(tfm.width)) + 1
except ValueError:
nchars = 0
self.widths = [ (1000*tfm.width.get(char, 0)) >> 20
for char in xrange(nchars) ]
def __eq__(self, other):
return self.__class__ == other.__class__ and \
self.texname == other.texname and self.size == other.size
def __ne__(self, other):
return not self.__eq__(other)
def _width_of(self, char):
"""
Width of char in dvi units. For internal use by dviread.py.
"""
width = self._tfm.width.get(char, None)
if width is not None:
return _mul2012(width, self._scale)
matplotlib.verbose.report(
'No width for char %d in font %s' % (char, self.texname),
'debug')
return 0
def _height_depth_of(self, char):
"""
Height and depth of char in dvi units. For internal use by dviread.py.
"""
result = []
for metric,name in ((self._tfm.height, "height"),
(self._tfm.depth, "depth")):
value = metric.get(char, None)
if value is None:
matplotlib.verbose.report(
'No %s for char %d in font %s' % (name, char, self.texname),
'debug')
result.append(0)
else:
result.append(_mul2012(value, self._scale))
return result
class Vf(Dvi):
"""
A virtual font (\*.vf file) containing subroutines for dvi files.
Usage::
vf = Vf(filename)
glyph = vf[code]
glyph.text, glyph.boxes, glyph.width
"""
def __init__(self, filename):
Dvi.__init__(self, filename, 0)
try:
self._first_font = None
self._chars = {}
self._packet_ends = None
self._read()
finally:
self.close()
def __getitem__(self, code):
return self._chars[code]
def _dispatch(self, byte):
# If we are in a packet, execute the dvi instructions
if self.state == _dvistate.inpage:
byte_at = self.file.tell()-1
if byte_at == self._packet_ends:
self._finalize_packet()
# fall through
elif byte_at > self._packet_ends:
raise ValueError("Packet length mismatch in vf file")
else:
if byte in (139, 140) or byte >= 243:
raise ValueError("Inappropriate opcode %d in vf file" % byte)
Dvi._dispatch(self, byte)
return
# We are outside a packet
if byte < 242: # a short packet (length given by byte)
cc, tfm = self._arg(1), self._arg(3)
self._init_packet(byte, cc, tfm)
elif byte == 242: # a long packet
pl, cc, tfm = [ self._arg(x) for x in (4, 4, 4) ]
self._init_packet(pl, cc, tfm)
elif 243 <= byte <= 246:
Dvi._dispatch(self, byte)
elif byte == 247: # preamble
i, k = self._arg(1), self._arg(1)
x = self.file.read(k)
cs, ds = self._arg(4), self._arg(4)
self._pre(i, x, cs, ds)
elif byte == 248: # postamble (just some number of 248s)
self.state = _dvistate.post_post
else:
raise ValueError("unknown vf opcode %d" % byte)
def _init_packet(self, pl, cc, tfm):
if self.state != _dvistate.outer:
raise ValueError("Misplaced packet in vf file")
self.state = _dvistate.inpage
self._packet_ends = self.file.tell() + pl
self._packet_char = cc
self._packet_width = tfm
self.h, self.v, self.w, self.x, self.y, self.z = 0, 0, 0, 0, 0, 0
self.stack, self.text, self.boxes = [], [], []
self.f = self._first_font
def _finalize_packet(self):
self._chars[self._packet_char] = mpl_cbook.Bunch(
text=self.text, boxes=self.boxes, width = self._packet_width)
self.state = _dvistate.outer
def _pre(self, i, x, cs, ds):
if self.state != _dvistate.pre:
raise ValueError("pre command in middle of vf file")
if i != 202:
raise ValueError("Unknown vf format %d" % i)
if len(x):
matplotlib.verbose.report('vf file comment: ' + x, 'debug')
self.state = _dvistate.outer
# cs = checksum, ds = design size
def _fnt_def(self, k, *args):
Dvi._fnt_def(self, k, *args)
if self._first_font is None:
self._first_font = k
def _fix2comp(num):
"""
Convert from two's complement to negative.
"""
assert 0 <= num < 2**32
if num & 2**31:
return num - 2**32
else:
return num
def _mul2012(num1, num2):
"""
Multiply two numbers in 20.12 fixed point format.
"""
# Separated into a function because >> has surprising precedence
return (num1*num2) >> 20
class Tfm(object):
"""
A TeX Font Metric file. This implementation covers only the bare
minimum needed by the Dvi class.
.. attribute:: checksum
Used for verifying against the dvi file.
.. attribute:: design_size
Design size of the font (in what units?)
.. attribute:: width
Width of each character, needs to be scaled by the factor
specified in the dvi file. This is a dict because indexing may
not start from 0.
.. attribute:: height
Height of each character.
.. attribute:: depth
Depth of each character.
"""
__slots__ = ('checksum', 'design_size', 'width', 'height', 'depth')
def __init__(self, filename):
matplotlib.verbose.report('opening tfm file ' + filename, 'debug')
with open(filename, 'rb') as file:
header1 = file.read(24)
lh, bc, ec, nw, nh, nd = \
struct.unpack(str('!6H'), header1[2:14])
matplotlib.verbose.report(
'lh=%d, bc=%d, ec=%d, nw=%d, nh=%d, nd=%d' % (
lh, bc, ec, nw, nh, nd), 'debug')
header2 = file.read(4*lh)
self.checksum, self.design_size = \
struct.unpack(str('!2I'), header2[:8])
# there is also encoding information etc.
char_info = file.read(4*(ec-bc+1))
widths = file.read(4*nw)
heights = file.read(4*nh)
depths = file.read(4*nd)
self.width, self.height, self.depth = {}, {}, {}
widths, heights, depths = \
[ struct.unpack(str('!%dI') % (len(x)/4), x)
for x in (widths, heights, depths) ]
for idx, char in enumerate(xrange(bc, ec+1)):
self.width[char] = _fix2comp(widths[ord(char_info[4*idx])])
self.height[char] = _fix2comp(heights[ord(char_info[4*idx+1]) >> 4])
self.depth[char] = _fix2comp(depths[ord(char_info[4*idx+1]) & 0xf])
class PsfontsMap(object):
"""
A psfonts.map formatted file, mapping TeX fonts to PS fonts.
Usage::
>>> map = PsfontsMap(find_tex_file('pdftex.map'))
>>> entry = map['ptmbo8r']
>>> entry.texname
'ptmbo8r'
>>> entry.psname
'Times-Bold'
>>> entry.encoding
'/usr/local/texlive/2008/texmf-dist/fonts/enc/dvips/base/8r.enc'
>>> entry.effects
{'slant': 0.16700000000000001}
>>> entry.filename
For historical reasons, TeX knows many Type-1 fonts by different
names than the outside world. (For one thing, the names have to
fit in eight characters.) Also, TeX's native fonts are not Type-1
but Metafont, which is nontrivial to convert to PostScript except
as a bitmap. While high-quality conversions to Type-1 format exist
and are shipped with modern TeX distributions, we need to know
which Type-1 fonts are the counterparts of which native fonts. For
these reasons a mapping is needed from internal font names to font
file names.
A texmf tree typically includes mapping files called e.g.
psfonts.map, pdftex.map, dvipdfm.map. psfonts.map is used by
dvips, pdftex.map by pdfTeX, and dvipdfm.map by dvipdfm.
psfonts.map might avoid embedding the 35 PostScript fonts (i.e.,
have no filename for them, as in the Times-Bold example above),
while the pdf-related files perhaps only avoid the "Base 14" pdf
fonts. But the user may have configured these files differently.
"""
__slots__ = ('_font',)
def __init__(self, filename):
self._font = {}
with open(filename, 'rt') as file:
self._parse(file)
def __getitem__(self, texname):
try:
result = self._font[texname]
except KeyError:
result = self._font[texname.decode('ascii')]
fn, enc = result.filename, result.encoding
if fn is not None and not fn.startswith('/'):
result.filename = find_tex_file(fn)
if enc is not None and not enc.startswith('/'):
result.encoding = find_tex_file(result.encoding)
return result
def _parse(self, file):
"""Parse each line into words."""
for line in file:
line = line.strip()
if line == '' or line.startswith('%'):
continue
words, pos = [], 0
while pos < len(line):
if line[pos] == '"': # double quoted word
pos += 1
end = line.index('"', pos)
words.append(line[pos:end])
pos = end + 1
else: # ordinary word
end = line.find(' ', pos+1)
if end == -1: end = len(line)
words.append(line[pos:end])
pos = end
while pos < len(line) and line[pos] == ' ':
pos += 1
self._register(words)
def _register(self, words):
"""Register a font described by "words".
The format is, AFAIK: texname fontname [effects and filenames]
Effects are PostScript snippets like ".177 SlantFont",
filenames begin with one or two less-than signs. A filename
ending in enc is an encoding file, other filenames are font
files. This can be overridden with a left bracket: <[foobar
indicates an encoding file named foobar.
There is some difference between <foo.pfb and <<bar.pfb in
subsetting, but I have no example of << in my TeX installation.
"""
# If the map file specifies multiple encodings for a font, we
# follow pdfTeX in choosing the last one specified. Such
# entries are probably mistakes but they have occurred.
# http://tex.stackexchange.com/questions/10826/
# http://article.gmane.org/gmane.comp.tex.pdftex/4914
texname, psname = words[:2]
effects, encoding, filename = '', None, None
for word in words[2:]:
if not word.startswith('<'):
effects = word
else:
word = word.lstrip('<')
if word.startswith('[') or word.endswith('.enc'):
if encoding is not None:
matplotlib.verbose.report(
'Multiple encodings for %s = %s'
% (texname, psname), 'debug')
if word.startswith('['):
encoding = word[1:]
else:
encoding = word
else:
assert filename is None
filename = word
eff = effects.split()
effects = {}
try:
effects['slant'] = float(eff[eff.index('SlantFont')-1])
except ValueError:
pass
try:
effects['extend'] = float(eff[eff.index('ExtendFont')-1])
except ValueError:
pass
self._font[texname] = mpl_cbook.Bunch(
texname=texname, psname=psname, effects=effects,
encoding=encoding, filename=filename)
class Encoding(object):
"""
Parses a \*.enc file referenced from a psfonts.map style file.
The format this class understands is a very limited subset of
PostScript.
Usage (subject to change)::
for name in Encoding(filename):
whatever(name)
"""
__slots__ = ('encoding',)
def __init__(self, filename):
with open(filename, 'rt') as file:
matplotlib.verbose.report('Parsing TeX encoding ' + filename, 'debug-annoying')
self.encoding = self._parse(file)
matplotlib.verbose.report('Result: ' + repr(self.encoding), 'debug-annoying')
def __iter__(self):
for name in self.encoding:
yield name
def _parse(self, file):
result = []
state = 0
for line in file:
comment_start = line.find('%')
if comment_start > -1:
line = line[:comment_start]
line = line.strip()
if state == 0:
# Expecting something like /FooEncoding [
if '[' in line:
state = 1
line = line[line.index('[')+1:].strip()
if state == 1:
if ']' in line: # ] def
line = line[:line.index(']')]
state = 2
words = line.split()
for w in words:
if w.startswith('/'):
# Allow for /abc/def/ghi
subwords = w.split('/')
result.extend(subwords[1:])
else:
raise ValueError("Broken name in encoding file: " + w)
return result
def find_tex_file(filename, format=None):
"""
Call :program:`kpsewhich` to find a file in the texmf tree. If
*format* is not None, it is used as the value for the
`--format` option.
Apparently most existing TeX distributions on Unix-like systems
use kpathsea. I hear MikTeX (a popular distribution on Windows)
doesn't use kpathsea, so what do we do? (TODO)
.. seealso::
`Kpathsea documentation <http://www.tug.org/kpathsea/>`_
The library that :program:`kpsewhich` is part of.
"""
cmd = ['kpsewhich']
if format is not None:
cmd += ['--format=' + format]
cmd += [filename]
matplotlib.verbose.report('find_tex_file(%s): %s' \
% (filename,cmd), 'debug')
# stderr is unused, but reading it avoids a subprocess optimization
# that breaks EINTR handling in some Python versions:
# http://bugs.python.org/issue12493
# https://github.com/matplotlib/matplotlib/issues/633
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
result = pipe.communicate()[0].rstrip()
matplotlib.verbose.report('find_tex_file result: %s' % result,
'debug')
return result.decode('ascii')
# With multiple text objects per figure (e.g., tick labels) we may end
# up reading the same tfm and vf files many times, so we implement a
# simple cache. TODO: is this worth making persistent?
_tfmcache = {}
_vfcache = {}
def _fontfile(texname, class_, suffix, cache):
try:
return cache[texname]
except KeyError:
pass
filename = find_tex_file(texname + suffix)
if filename:
result = class_(filename)
else:
result = None
cache[texname] = result
return result
def _tfmfile(texname):
return _fontfile(texname, Tfm, '.tfm', _tfmcache)
def _vffile(texname):
return _fontfile(texname, Vf, '.vf', _vfcache)
if __name__ == '__main__':
import sys
matplotlib.verbose.set_level('debug-annoying')
fname = sys.argv[1]
try: dpi = float(sys.argv[2])
except IndexError: dpi = None
dvi = Dvi(fname, dpi)
fontmap = PsfontsMap(find_tex_file('pdftex.map'))
for page in dvi:
print('=== new page ===')
fPrev = None
for x,y,f,c,w in page.text:
if f != fPrev:
print('font', f.texname, 'scaled', f._scale/pow(2.0,20))
fPrev = f
print(x,y,c, 32 <= c < 128 and chr(c) or '.', w)
for x,y,w,h in page.boxes:
print(x,y,'BOX',w,h)
| mit |
bthirion/scikit-learn | examples/preprocessing/plot_robust_scaling.py | 85 | 2698 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Robust Scaling on Toy Data
=========================================================
Making sure that each Feature has approximately the same scale can be a
crucial preprocessing step. However, when data contains outliers,
:class:`StandardScaler <sklearn.preprocessing.StandardScaler>` can often
be mislead. In such cases, it is better to use a scaler that is robust
against outliers.
Here, we demonstrate this on a toy dataset, where one single datapoint
is a large outlier.
"""
from __future__ import print_function
print(__doc__)
# Code source: Thomas Unterthiner
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler, RobustScaler
# Create training and test data
np.random.seed(42)
n_datapoints = 100
Cov = [[0.9, 0.0], [0.0, 20.0]]
mu1 = [100.0, -3.0]
mu2 = [101.0, -3.0]
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_train = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_train = np.vstack([X1, X2])
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_test = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_test = np.vstack([X1, X2])
X_train[0, 0] = -1000 # a fairly large outlier
# Scale data
standard_scaler = StandardScaler()
Xtr_s = standard_scaler.fit_transform(X_train)
Xte_s = standard_scaler.transform(X_test)
robust_scaler = RobustScaler()
Xtr_r = robust_scaler.fit_transform(X_train)
Xte_r = robust_scaler.transform(X_test)
# Plot data
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
ax[0].scatter(X_train[:, 0], X_train[:, 1],
color=np.where(Y_train > 0, 'r', 'b'))
ax[1].scatter(Xtr_s[:, 0], Xtr_s[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[2].scatter(Xtr_r[:, 0], Xtr_r[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[0].set_title("Unscaled data")
ax[1].set_title("After standard scaling (zoomed in)")
ax[2].set_title("After robust scaling (zoomed in)")
# for the scaled data, we zoom in to the data center (outlier can't be seen!)
for a in ax[1:]:
a.set_xlim(-3, 3)
a.set_ylim(-3, 3)
plt.tight_layout()
plt.show()
# Classify using k-NN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(Xtr_s, Y_train)
acc_s = knn.score(Xte_s, Y_test)
print("Testset accuracy using standard scaler: %.3f" % acc_s)
knn.fit(Xtr_r, Y_train)
acc_r = knn.score(Xte_r, Y_test)
print("Testset accuracy using robust scaler: %.3f" % acc_r)
| bsd-3-clause |
wogsland/QSTK | Homework/hw2.py | 1 | 3248 | '''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on January, 23, 2013
@author: Sourabh Bajaj
@contact: [email protected]
@summary: Event Profiler Tutorial
'''
import pandas as pd
import numpy as np
import math
import copy
import QSTK.qstkutil.qsdateutil as du
import datetime as dt
import QSTK.qstkutil.DataAccess as da
import QSTK.qstkutil.tsutil as tsu
import QSTK.qstkstudy.EventProfiler as ep
"""
Accepts a list of symbols along with start and end date
Returns the Event Matrix which is a pandas Datamatrix
Event matrix has the following structure :
|IBM |GOOG|XOM |MSFT| GS | JP |
(d1)|nan |nan | 1 |nan |nan | 1 |
(d2)|nan | 1 |nan |nan |nan |nan |
(d3)| 1 |nan | 1 |nan | 1 |nan |
(d4)|nan | 1 |nan | 1 |nan |nan |
...................................
...................................
Also, d1 = start date
nan = no information about any event.
1 = status bit(positively confirms the event occurence)
"""
def find_events(ls_symbols, d_data):
''' Finding the event dataframe '''
df_close = d_data['actual_close']
ts_market = df_close['SPY']
print "Finding Events"
# Creating an empty dataframe
df_events = copy.deepcopy(df_close)
df_events = df_events * np.NAN
# Time stamps for the event range
ldt_timestamps = df_close.index
for s_sym in ls_symbols:
for i in range(1, len(ldt_timestamps)):
# Calculating the returns for this timestamp
f_symprice_today = df_close[s_sym].ix[ldt_timestamps[i]]
f_symprice_yest = df_close[s_sym].ix[ldt_timestamps[i - 1]]
f_marketprice_today = ts_market.ix[ldt_timestamps[i]]
f_marketprice_yest = ts_market.ix[ldt_timestamps[i - 1]]
f_symreturn_today = (f_symprice_today / f_symprice_yest) - 1
f_marketreturn_today = (f_marketprice_today / f_marketprice_yest) - 1
# Event is found if the symbol is down more then 3% while the
# market is up more then 2%
if f_symprice_yest >= 7 and f_symprice_today < 7:
df_events[s_sym].ix[ldt_timestamps[i]] = 1
return df_events
if __name__ == '__main__':
dt_start = dt.datetime(2008, 1, 1)
dt_end = dt.datetime(2009, 12, 31)
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt.timedelta(hours=16))
dataobj = da.DataAccess('Yahoo')
ls_symbols = dataobj.get_symbols_from_list('sp5002012')
ls_symbols.append('SPY')
ls_keys = ['open', 'high', 'low', 'close', 'volume', 'actual_close']
ldf_data = dataobj.get_data(ldt_timestamps, ls_symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
for s_key in ls_keys:
d_data[s_key] = d_data[s_key].fillna(method='ffill')
d_data[s_key] = d_data[s_key].fillna(method='bfill')
d_data[s_key] = d_data[s_key].fillna(1.0)
df_events = find_events(ls_symbols, d_data)
print "Creating Study"
ep.eventprofiler(df_events, d_data, i_lookback=20, i_lookforward=20,
s_filename='SP2012-7.pdf', b_market_neutral=True, b_errorbars=True,
s_market_sym='SPY')
| bsd-3-clause |
mhue/scikit-learn | sklearn/tests/test_multiclass.py | 72 | 24581 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.multiclass import fit_ovr
from sklearn.multiclass import fit_ovo
from sklearn.multiclass import fit_ecoc
from sklearn.multiclass import predict_ovr
from sklearn.multiclass import predict_ovo
from sklearn.multiclass import predict_ecoc
from sklearn.multiclass import predict_proba_ovr
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.preprocessing import LabelBinarizer
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
with ignore_warnings():
assert_raises(ValueError, predict_ovr, [LinearSVC(), MultinomialNB()],
LabelBinarizer(), [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
@ignore_warnings
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = [["spam", "eggs"], ["spam"], ["ham", "eggs", "spam"],
["ham", "eggs"], ["ham"]]
# y = [[1, 2], [1], [0, 1, 2], [0, 2], [0]]
Y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
# test input as lists of tuples
clf = assert_warns(DeprecationWarning,
OneVsRestClassifier(base_clf).fit,
X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_equal(set(y_pred), set(["spam", "eggs"]))
assert_true(clf.multilabel_)
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
# don't densify sparse coefficients
assert_equal(sp.issparse(ovr.estimators_[0].coef_), sp.issparse(ovr.coef_))
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
prediction_from_list = ovo.fit(iris.data,
list(iris.target)).predict(iris.data)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert_equal(decisions.shape, (n_samples, n_classes))
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.])))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert_greater(len(np.unique(decisions[:, class_idx])), 146)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
@ignore_warnings
def test_deprecated():
base_estimator = DecisionTreeClassifier(random_state=0)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
all_metas = [
(OneVsRestClassifier, fit_ovr, predict_ovr, predict_proba_ovr),
(OneVsOneClassifier, fit_ovo, predict_ovo, None),
(OutputCodeClassifier, fit_ecoc, predict_ecoc, None),
]
for MetaEst, fit_func, predict_func, proba_func in all_metas:
try:
meta_est = MetaEst(base_estimator,
random_state=0).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train,
random_state=0)
except TypeError:
meta_est = MetaEst(base_estimator).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train)
if len(fitted_return) == 2:
estimators_, classes_or_lb = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
X_test),
meta_est.predict(X_test))
if proba_func is not None:
assert_almost_equal(proba_func(estimators_, X_test,
is_multilabel=False),
meta_est.predict_proba(X_test))
else:
estimators_, classes_or_lb, codebook = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
codebook, X_test),
meta_est.predict(X_test))
| bsd-3-clause |
ChinaQuants/zipline | zipline/gens/tradesimulation.py | 9 | 15117 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib2 import ExitStack
from logbook import Logger, Processor
from pandas.tslib import normalize_date
from zipline.utils.api_support import ZiplineAPI
from zipline.finance.trading import NoFurtherDataError
from zipline.protocol import (
BarData,
SIDData,
DATASOURCE_TYPE
)
log = Logger('Trade Simulation')
class AlgorithmSimulator(object):
EMISSION_TO_PERF_KEY_MAP = {
'minute': 'minute_perf',
'daily': 'daily_perf'
}
def __init__(self, algo, sim_params):
# ==============
# Simulation
# Param Setup
# ==============
self.sim_params = sim_params
# ==============
# Algo Setup
# ==============
self.algo = algo
self.algo_start = normalize_date(self.sim_params.first_open)
self.env = algo.trading_environment
# ==============
# Snapshot Setup
# ==============
# The algorithm's data as of our most recent event.
# We want an object that will have empty objects as default
# values on missing keys.
self.current_data = BarData()
# We don't have a datetime for the current snapshot until we
# receive a message.
self.simulation_dt = None
# =============
# Logging Setup
# =============
# Processor function for injecting the algo_dt into
# user prints/logs.
def inject_algo_dt(record):
if 'algo_dt' not in record.extra:
record.extra['algo_dt'] = self.simulation_dt
self.processor = Processor(inject_algo_dt)
def transform(self, stream_in):
"""
Main generator work loop.
"""
# Initialize the mkt_close
mkt_open = self.algo.perf_tracker.market_open
mkt_close = self.algo.perf_tracker.market_close
# inject the current algo
# snapshot time to any log record generated.
with ExitStack() as stack:
stack.enter_context(self.processor)
stack.enter_context(ZiplineAPI(self.algo))
data_frequency = self.sim_params.data_frequency
self._call_before_trading_start(mkt_open)
for date, snapshot in stream_in:
self.simulation_dt = date
self.on_dt_changed(date)
# If we're still in the warmup period. Use the event to
# update our universe, but don't yield any perf messages,
# and don't send a snapshot to handle_data.
if date < self.algo_start:
for event in snapshot:
if event.type == DATASOURCE_TYPE.SPLIT:
self.algo.blotter.process_split(event)
elif event.type == DATASOURCE_TYPE.TRADE:
self.update_universe(event)
self.algo.perf_tracker.process_trade(event)
elif event.type == DATASOURCE_TYPE.CUSTOM:
self.update_universe(event)
else:
messages = self._process_snapshot(
date,
snapshot,
self.algo.instant_fill,
)
# Perf messages are only emitted if the snapshot contained
# a benchmark event.
for message in messages:
yield message
# When emitting minutely, we need to call
# before_trading_start before the next trading day begins
if date == mkt_close:
if mkt_close <= self.algo.perf_tracker.last_close:
before_last_close = \
mkt_close < self.algo.perf_tracker.last_close
try:
mkt_open, mkt_close = \
self.env.next_open_and_close(mkt_close)
except NoFurtherDataError:
# If at the end of backtest history,
# skip advancing market close.
pass
if before_last_close:
self._call_before_trading_start(mkt_open)
elif data_frequency == 'daily':
next_day = self.env.next_trading_day(date)
if next_day is not None and \
next_day < self.algo.perf_tracker.last_close:
self._call_before_trading_start(next_day)
self.algo.portfolio_needs_update = True
self.algo.account_needs_update = True
self.algo.performance_needs_update = True
risk_message = self.algo.perf_tracker.handle_simulation_end()
yield risk_message
def _process_snapshot(self, dt, snapshot, instant_fill):
"""
Process a stream of events corresponding to a single datetime, possibly
returning a perf message to be yielded.
If @instant_fill = True, we delay processing of events until after the
user's call to handle_data, and we process the user's placed orders
before the snapshot's events. Note that this introduces a lookahead
bias, since the user effectively is effectively placing orders that are
filled based on trades that happened prior to the call the handle_data.
If @instant_fill = False, we process Trade events before calling
handle_data. This means that orders are filled based on trades
occurring in the next snapshot. This is the more conservative model,
and as such it is the default behavior in TradingAlgorithm.
"""
# Flags indicating whether we saw any events of type TRADE and type
# BENCHMARK. Respectively, these control whether or not handle_data is
# called for this snapshot and whether we emit a perf message for this
# snapshot.
any_trade_occurred = False
benchmark_event_occurred = False
if instant_fill:
events_to_be_processed = []
# Assign process events to variables to avoid attribute access in
# innermost loops.
#
# Done here, to allow for perf_tracker or blotter to be swapped out
# or changed in between snapshots.
perf_process_trade = self.algo.perf_tracker.process_trade
perf_process_transaction = self.algo.perf_tracker.process_transaction
perf_process_order = self.algo.perf_tracker.process_order
perf_process_benchmark = self.algo.perf_tracker.process_benchmark
perf_process_split = self.algo.perf_tracker.process_split
perf_process_dividend = self.algo.perf_tracker.process_dividend
perf_process_commission = self.algo.perf_tracker.process_commission
perf_process_close_position = \
self.algo.perf_tracker.process_close_position
blotter_process_trade = self.algo.blotter.process_trade
blotter_process_benchmark = self.algo.blotter.process_benchmark
# Containers for the snapshotted events, so that the events are
# processed in a predictable order, without relying on the sorted order
# of the individual sources.
# There is only one benchmark per snapshot, will be set to the current
# benchmark iff it occurs.
benchmark = None
# trades and customs are initialized as a list since process_snapshot
# is most often called on market bars, which could contain trades or
# custom events.
trades = []
customs = []
closes = []
# splits and dividends are processed once a day.
#
# The avoidance of creating the list every time this is called is more
# to attempt to show that this is the infrequent case of the method,
# since the performance benefit from deferring the list allocation is
# marginal. splits list will be allocated when a split occurs in the
# snapshot.
splits = None
# dividends list will be allocated when a dividend occurs in the
# snapshot.
dividends = None
for event in snapshot:
if event.type == DATASOURCE_TYPE.TRADE:
trades.append(event)
elif event.type == DATASOURCE_TYPE.BENCHMARK:
benchmark = event
elif event.type == DATASOURCE_TYPE.SPLIT:
if splits is None:
splits = []
splits.append(event)
elif event.type == DATASOURCE_TYPE.CUSTOM:
customs.append(event)
elif event.type == DATASOURCE_TYPE.DIVIDEND:
if dividends is None:
dividends = []
dividends.append(event)
elif event.type == DATASOURCE_TYPE.CLOSE_POSITION:
closes.append(event)
else:
raise log.warn("Unrecognized event=%s".format(event))
# Handle benchmark first.
#
# Internal broker implementation depends on the benchmark being
# processed first so that transactions and commissions reported from
# the broker can be injected.
if benchmark is not None:
benchmark_event_occurred = True
perf_process_benchmark(benchmark)
for txn, order in blotter_process_benchmark(benchmark):
if txn.type == DATASOURCE_TYPE.TRANSACTION:
perf_process_transaction(txn)
elif txn.type == DATASOURCE_TYPE.COMMISSION:
perf_process_commission(txn)
perf_process_order(order)
for trade in trades:
self.update_universe(trade)
any_trade_occurred = True
if instant_fill:
events_to_be_processed.append(trade)
else:
for txn, order in blotter_process_trade(trade):
if txn.type == DATASOURCE_TYPE.TRANSACTION:
perf_process_transaction(txn)
elif txn.type == DATASOURCE_TYPE.COMMISSION:
perf_process_commission(txn)
perf_process_order(order)
perf_process_trade(trade)
for custom in customs:
self.update_universe(custom)
for close in closes:
self.update_universe(close)
perf_process_close_position(close)
if splits is not None:
for split in splits:
# process_split is not assigned to a variable since it is
# called rarely compared to the other event processors.
self.algo.blotter.process_split(split)
perf_process_split(split)
if dividends is not None:
for dividend in dividends:
perf_process_dividend(dividend)
if any_trade_occurred:
new_orders = self._call_handle_data()
for order in new_orders:
perf_process_order(order)
if instant_fill:
# Now that handle_data has been called and orders have been placed,
# process the event stream to fill user orders based on the events
# from this snapshot.
for trade in events_to_be_processed:
for txn, order in blotter_process_trade(trade):
if txn is not None:
perf_process_transaction(txn)
if order is not None:
perf_process_order(order)
perf_process_trade(trade)
if benchmark_event_occurred:
return self.generate_messages(dt)
else:
return ()
def _call_handle_data(self):
"""
Call the user's handle_data, returning any orders placed by the algo
during the call.
"""
self.algo.event_manager.handle_data(
self.algo,
self.current_data,
self.simulation_dt,
)
orders = self.algo.blotter.new_orders
self.algo.blotter.new_orders = []
return orders
def _call_before_trading_start(self, dt):
dt = normalize_date(dt)
self.simulation_dt = dt
self.on_dt_changed(dt)
self.algo.before_trading_start(self.current_data)
def on_dt_changed(self, dt):
if self.algo.datetime != dt:
self.algo.on_dt_changed(dt)
def generate_messages(self, dt):
"""
Generator that yields perf messages for the given datetime.
"""
# Ensure that updated_portfolio has been called at least once for this
# dt before we emit a perf message. This is a no-op if
# updated_portfolio has already been called this dt.
self.algo.updated_portfolio()
self.algo.updated_account()
rvars = self.algo.recorded_vars
if self.algo.perf_tracker.emission_rate == 'daily':
perf_message = \
self.algo.perf_tracker.handle_market_close_daily()
perf_message['daily_perf']['recorded_vars'] = rvars
yield perf_message
elif self.algo.perf_tracker.emission_rate == 'minute':
# close the minute in the tracker, and collect the daily message if
# the minute is the close of the trading day
minute_message, daily_message = \
self.algo.perf_tracker.handle_minute_close(dt)
# collect and yield the minute's perf message
minute_message['minute_perf']['recorded_vars'] = rvars
yield minute_message
# if there was a daily perf message, collect and yield it
if daily_message:
daily_message['daily_perf']['recorded_vars'] = rvars
yield daily_message
def update_universe(self, event):
"""
Update the universe with new event information.
"""
# Update our knowledge of this event's sid
# rather than use if event.sid in ..., just trying
# and handling the exception is significantly faster
try:
sid_data = self.current_data[event.sid]
except KeyError:
sid_data = self.current_data[event.sid] = SIDData(event.sid)
sid_data.__dict__.update(event.__dict__)
| apache-2.0 |
manashmndl/scikit-learn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
jschuecker/nest-simulator | topology/pynest/hl_api.py | 8 | 71159 | # -*- coding: utf-8 -*-
#
# hl_api.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
**High-level API of PyNEST Topology Module**
This file defines the user-level functions of NEST's Python interface to the
Topology module. The basic approach is the same as for the PyNEST interface to
NEST:
1. Function names are the same as in SLI.
2. Nodes are identified by their GIDs.
3. GIDs are always given as tuples or lists of integer(s).
4. Commands returning GIDs return them as tuples.
5. Other arguments can be
* single items that are applied to all entries in a GID list
* a list of the same length as the given list of GID(s) where each item is
matched with the pertaining GID.
**Example**
::
layers = CreateLayer(({...}, {...}, {...}))
creates three layers and returns a tuple of three GIDs.
::
ConnectLayers(layers[:2], layers[1:], {...})
connects `layers[0]` to `layers[1]` and `layers[1]` to `layers[2]` \
using the same dictionary to specify both connections.
::
ConnectLayers(layers[:2], layers[1:], ({...}, {...}))
connects the same layers, but the `layers[0]` to `layers[1]` connection
is specified by the first dictionary, the `layers[1]` to `layers[2]`
connection by the second.
:Authors:
Kittel Austvoll,
Hans Ekkehard Plesser,
Hakon Enger
"""
import nest
import nest.lib.hl_api_helper as hlh
def topology_func(slifunc, *args):
"""
Execute SLI function `slifunc` with arguments `args` in Topology namespace.
Parameters
----------
slifunc : str
SLI namespace expression
Other parameters
----------------
args : dict
An arbitrary number of arguments
Returns
-------
out :
Values from SLI function `slifunc`
See also
--------
nest.sli_func
"""
return nest.sli_func(slifunc, *args)
class Mask(object):
"""
Class for spatial masks.
Masks are used when creating connections in the Topology module. A mask
describes which area of the pool layer shall be searched for nodes to
connect for any given node in the driver layer. Masks are created using
the ``CreateMask`` command.
"""
_datum = None
# The constructor should not be called by the user
def __init__(self, datum):
"""Masks must be created using the CreateMask command."""
if not isinstance(datum, nest.SLIDatum) or datum.dtype != "masktype":
raise TypeError("expected mask Datum")
self._datum = datum
# Generic binary operation
def _binop(self, op, other):
if not isinstance(other, Mask):
return NotImplemented
return Mask(topology_func(op, self._datum, other._datum))
def __or__(self, other):
return self._binop("or", other)
def __and__(self, other):
return self._binop("and", other)
def __sub__(self, other):
return self._binop("sub", other)
def Inside(self, point):
"""
Test if a point is inside a mask.
Parameters
----------
point : tuple/list of float values
Coordinate of point
Returns
-------
out : bool
True if the point is inside the mask, False otherwise
"""
return topology_func("Inside", point, self._datum)
def CreateMask(masktype, specs, anchor=None):
"""
Create a spatial mask for connections.
Masks are used when creating connections in the Topology module. A mask
describes the area of the pool layer that is searched for nodes to
connect for any given node in the driver layer. Several mask types
are available. Examples are the grid region, the rectangular, circular or
doughnut region.
The command ``CreateMask`` creates a Mask object which may be combined
with other ``Mask`` objects using Boolean operators. The mask is specified
in a dictionary.
``Mask`` objects can be passed to ``ConnectLayers`` in a
connection dictionary with the key `'mask'`.
Parameters
----------
masktype : str, ['rectangular' | 'circular' | 'doughnut' | 'elliptical']
for 2D masks, \ ['box' | 'spherical' | 'ellipsoidal] for 3D masks,
['grid'] only for grid-based layers in 2D
The mask name corresponds to the geometrical shape of the mask. There
are different types for 2- and 3-dimensional layers.
specs : dict
Dictionary specifying the parameters of the provided `masktype`,
see **Notes**.
anchor : [tuple/list of floats | dict with the keys `'column'` and \
`'row'` (for grid masks only)], optional, default: None
By providing anchor coordinates, the location of the mask relative to
the driver node can be changed. The list of coordinates has a length
of 2 or 3 dependent on the number of dimensions.
Returns
-------
out : ``Mask`` object
See also
--------
ConnectLayers: Connect two (lists of) layers pairwise according to
specified projections. ``Mask`` objects can be passed in a connection
dictionary with the key `'mask'`.
Notes
-----
-
**Mask types**
Available mask types (`masktype`) and their corresponding parameter
dictionaries:
* 2D free and grid-based layers
::
'rectangular' :
{'lower_left' : [float, float],
'upper_right': [float, float]}
#or
'circular' :
{'radius' : float}
#or
'doughnut' :
{'inner_radius' : float,
'outer_radius' : float}
#or
'elliptical' :
{'major_axis' : float,
'minor_axis' : float,
'azimuth_angle' : float, # default: 0.0,
'anchor' : [float, float], # default: [0.0, 0.0]}
* 3D free and grid-based layers
::
'box' :
{'lower_left' : [float, float, float],
'upper_right' : [float, float, float]}
#or
'spherical' :
{'radius' : float}
#or
'ellipsoidal' :
{'major_axis' : float,
'minor_axis' : float,
'polar_axis' : float
'azimuth_angle' : float, # default: 0.0,
'polar_angle' : float, # default: 0.0,
'anchor' : [float, float, float], # default: [0.0, 0.0, 0.0]}}
* 2D grid-based layers only
::
'grid' :
{'rows' : float,
'columns' : float}
By default the top-left corner of a grid mask, i.e., the grid
mask element with grid index [0, 0], is aligned with the driver
node. It can be changed by means of the 'anchor' parameter:
::
'anchor' :
{'row' : float,
'column' : float}
**Example**
::
import nest.topology as tp
# create a grid-based layer
l = tp.CreateLayer({'rows' : 5,
'columns' : 5,
'elements' : 'iaf_psc_alpha'})
# create a circular mask
m = tp.CreateMask('circular', {'radius': 0.2})
# connectivity specifications
conndict = {'connection_type': 'divergent',
'mask' : m}
# connect layer l with itself according to the specifications
tp.ConnectLayers(l, l, conndict)
"""
if anchor is None:
return Mask(topology_func('CreateMask', {masktype: specs}))
else:
return Mask(
topology_func('CreateMask', {masktype: specs, 'anchor': anchor}))
class Parameter(object):
"""
Class for parameters for distance dependency or randomization.
Parameters are spatial functions which are used when creating
connections in the Topology module. A parameter may be used as a
probability kernel when creating connections or as synaptic parameters
(such as weight and delay). Parameters are created using the
``CreateParameter`` command.
"""
_datum = None
# The constructor should not be called by the user
def __init__(self, datum):
"""Parameters must be created using the CreateParameter command."""
if not isinstance(datum,
nest.SLIDatum) or datum.dtype != "parametertype":
raise TypeError("expected parameter datum")
self._datum = datum
# Generic binary operation
def _binop(self, op, other):
if not isinstance(other, Parameter):
return NotImplemented
return Parameter(topology_func(op, self._datum, other._datum))
def __add__(self, other):
return self._binop("add", other)
def __sub__(self, other):
return self._binop("sub", other)
def __mul__(self, other):
return self._binop("mul", other)
def __div__(self, other):
return self._binop("div", other)
def __truediv__(self, other):
return self._binop("div", other)
def GetValue(self, point):
"""
Compute value of parameter at a point.
Parameters
----------
point : tuple/list of float values
coordinate of point
Returns
-------
out : value
The value of the parameter at the point
See also
--------
CreateParameter : create parameter for e.g., distance dependency
Notes
-----
-
**Example**
::
import nest.topology as tp
#linear dependent parameter
P = tp.CreateParameter('linear', {'a' : 2., 'c' : 0.})
#get out value
P.GetValue(point=[3., 4.])
"""
return topology_func("GetValue", point, self._datum)
def CreateParameter(parametertype, specs):
"""
Create a parameter for distance dependency or randomization.
Parameters are (spatial) functions which are used when creating
connections in the Topology module for distance dependency or
randomization. This command creates a Parameter object which may be
combined with other ``Parameter`` objects using arithmetic operators.
The parameter is specified in a dictionary.
A parameter may be used as a probability kernel when creating connections
or as synaptic parameters (such as weight and delay), i.e., for specifying
the parameters `'kernel'`, `'weights'` and `'delays'` in the
connection dictionary passed to ``ConnectLayers``.
Parameters
----------
parametertype : {'constant', 'linear', 'exponential', 'gaussian', \
'gaussian2D', 'uniform', 'normal', 'lognormal'}
Function types with or without distance dependency
specs : dict
Dictionary specifying the parameters of the provided
`'parametertype'`, see **Notes**.
Returns
-------
out : ``Parameter`` object
See also
--------
ConnectLayers : Connect two (lists of) layers pairwise according to
specified projections. Parameters can be used to specify the
parameters `'kernel'`, `'weights'` and `'delays'` in the
connection dictionary.
Parameters : Class for parameters for distance dependency or randomization.
Notes
-----
-
**Parameter types**
Available parameter types (`parametertype` parameter), their function and
acceptable keys for their corresponding specification dictionaries
* Constant
::
'constant' :
{'value' : float} # constant value
* With dependence on the distance `d`
::
# p(d) = c + a * d
'linear' :
{'a' : float, # slope, default: 1.0
'c' : float} # constant offset, default: 0.0
# or
# p(d) = c + a*exp(-d/tau)
'exponential' :
{'a' : float, # coefficient of exponential term, default: 1.0
'c' : float, # constant offset, default: 0.0
'tau' : float} # length scale factor, default: 1.0
# or
# p(d) = c + p_center*exp(-(d-mean)^2/(2*sigma^2))
'gaussian' :
{'p_center' : float, # value at center, default: 1.0
'mean' : float, # distance to center, default: 0.0
'sigma' : float, # width of Gaussian, default: 1.0
'c' : float} # constant offset, default: 0.0
* Bivariate Gaussian parameter:
::
# p(x,y) = c + p_center *
# exp( -( (x-mean_x)^2/sigma_x^2 + (y-mean_y)^2/sigma_y^2
# + 2*rho*(x-mean_x)*(y-mean_y)/(sigma_x*sigma_y) ) /
# (2*(1-rho^2)) )
'gaussian2D' :
{'p_center' : float, # value at center, default: 1.0
'mean_x' : float, # x-coordinate of center, default: 0.0
'mean_y' : float, # y-coordinate of center, default: 0.0
'sigma_x' : float, # width in x-direction, default: 1.0
'sigma_y' : float, # width in y-direction, default: 1.0
'rho' : float, # correlation of x and y, default: 0.0
'c' : float} # constant offset, default: 0.0
* Without distance dependency, for randomization
::
# random parameter with uniform distribution in [min,max)
'uniform' :
{'min' : float, # minimum value, default: 0.0
'max' : float} # maximum value, default: 1.0
# or
# random parameter with normal distribution, optionally truncated
# to [min,max)
'normal':
{'mean' : float, # mean value, default: 0.0
'sigma': float, # standard deviation, default: 1.0
'min' : float, # minimum value, default: -inf
'max' : float} # maximum value, default: +inf
# or
# random parameter with lognormal distribution,
# optionally truncated to [min,max)
'lognormal' :
{'mu' : float, # mean value of logarithm, default: 0.0
'sigma': float, # standard deviation of log, default: 1.0
'min' : float, # minimum value, default: -inf
'max' : float} # maximum value, default: +inf
**Example**
::
import nest.topology as tp
# create a grid-based layer
l = tp.CreateLayer({'rows' : 5,
'columns' : 5,
'elements' : 'iaf_psc_alpha'})
# parameter for delay with linear distance dependency
d = tp.CreateParameter('linear', {'a': 0.2,
'c': 0.2})
# connectivity specifications
conndict = {'connection_type': 'divergent',
'delays': d}
tp.ConnectLayers(l, l, conndict)
"""
return Parameter(topology_func('CreateParameter', {parametertype: specs}))
def CreateLayer(specs):
"""
Create one ore more Topology layer(s) according to given specifications.
The Topology module organizes neuronal networks in layers. A layer is a
special type of subnet which contains information about the spatial
position of its nodes (simple or composite elements) in 2 or 3 dimensions.
If `specs` is a dictionary, a single layer is created. If it is a list
of dictionaries, one layer is created for each dictionary.
Topology distinguishes between two classes of layers:
* grid-based layers in which each element is placed at a location in a
regular grid
* free layers in which elements can be placed arbitrarily
Obligatory dictionary entries define the class of layer
(grid-based layers: 'columns' and 'rows'; free layers: 'positions')
and the 'elements'.
Parameters
----------
specs : (tuple/list of) dict(s)
Dictionary or list of dictionaries with layer specifications, see
**Notes**.
Returns
-------
out : tuple of int(s)
GID(s) of created layer(s)
See also
--------
ConnectLayers: Connect two (lists of) layers which were created with
``CreateLayer`` pairwise according to specified projections.
Other parameters
----------------
Available parameters for the layer-specifying dictionary `specs`
center : tuple/list of floats, optional, default: (0.0, 0.0)
Layers are centered about the origin by default, but the center
coordinates can also be changed.
'center' has length 2 or 3 dependent on the number of dimensions.
columns : int, obligatory for grid-based layers
Number of columns.
Needs `'rows'`; mutually exclusive with `'positions'`.
edge_wrap : bool, default: False
Periodic boundary conditions.
elements : (tuple/list of) str or str followed by int
Elements of layers are NEST network nodes such as neuron models or
devices.
For network elements with several nodes of the same type, the
number of nodes to be created must follow the model name.
For composite elements, a collection of nodes can be passed as
list or tuple.
extent : tuple of floats, optional, default in 2D: (1.0, 1.0)
Size of the layer. It has length 2 or 3 dependent on the number of
dimensions.
positions : tuple/list of coordinates (lists/tuples of floats),
obligatory for free layers
Explicit specification of the positions of all elements.
The coordinates have a length 2 or 3 dependent on the number of
dimensions.
All element positions must be within the layer's extent.
Mutually exclusive with 'rows' and 'columns'.
rows : int, obligatory for grid-based layers
Number of rows.
Needs `'columns'`; mutually exclusive with `'positions'`.
Notes
-----
-
**Example**
::
import nest
import nest.topology as tp
# grid-based layer
gl = tp.CreateLayer({'rows' : 5,
'columns' : 5,
'elements' : 'iaf_psc_alpha'})
# free layer
import numpy as np
pos = [[np.random.uniform(-0.5, 0.5), np.random.uniform(-0.5,0.5)]
for i in range(50)]
fl = tp.CreateLayer({'positions' : pos,
'elements' : 'iaf_psc_alpha'})
# extent, center and edge_wrap
el = tp.CreateLayer({'rows' : 5,
'columns' : 5,
'extent' : [2.0, 3.0],
'center' : [1.0, 1.5],
'edge_wrap' : True,
'elements' : 'iaf_psc_alpha'})
# composite layer with several nodes of the same type
cl = tp.CreateLayer({'rows' : 1,
'columns' : 2,
'elements' : ['iaf_cond_alpha', 10,
'poisson_generator',
'noise_generator', 2]})
# investigate the status dictionary of a layer
nest.GetStatus(gl)[0]['topology']
"""
if isinstance(specs, dict):
specs = (specs, )
elif not all(isinstance(spec, dict) for spec in specs):
raise TypeError("specs must be a dictionary or a list of dictionaries")
for dicts in specs:
elements = dicts['elements']
if isinstance(elements, list):
for elem in elements:
hlh.model_deprecation_warning(elem)
else:
hlh.model_deprecation_warning(elements)
return topology_func('{ CreateLayer } Map', specs)
def ConnectLayers(pre, post, projections):
"""
Pairwise connect of pre- and postsynaptic (lists of) layers.
`pre` and `post` must be a tuple/list of GIDs of equal length. The GIDs
must refer to layers created with ``CreateLayers``. Layers in the `pre`
and `post` lists are connected pairwise.
* If `projections` is a single dictionary, it applies to all pre-post
pairs.
* If `projections` is a tuple/list of dictionaries, it must have the same
length as `pre` and `post` and each dictionary is matched with the proper
pre-post pair.
A minimal call of ``ConnectLayers`` expects a source layer `pre`, a
target layer `post` and a connection dictionary `projections`
containing at least the entry `'connection_type'` (either
`'convergent'` or `'divergent'`).
When connecting two layers, the driver layer is the one in which each node
is considered in turn. The pool layer is the one from which nodes are
chosen for each node in the driver layer.
Parameters
----------
pre : tuple/list of int(s)
List of GIDs of presynaptic layers (sources)
post : tuple/list of int(s)
List of GIDs of postsynaptic layers (targets)
projections : (tuple/list of) dict(s)
Dictionary or list of dictionaries specifying projection properties
Returns
-------
out : None
ConnectLayers returns `None`
See also
--------
CreateLayer : Create one or more Topology layer(s).
CreateMask : Create a ``Mask`` object. Documentation on available spatial
masks. Masks can be used to specify the key `'mask'` of the
connection dictionary.
CreateParameter : Create a ``Parameter`` object. Documentation on available
parameters for distance dependency and randomization. Parameters can
be used to specify the parameters `'kernel'`, `'weights'` and
`'delays'` of the connection dictionary.
nest.GetConnections : Retrieve connections.
Other parameters
----------------
Available keys for the layer-specifying dictionary `projections`
allow_autapses : bool, optional, default: True
An autapse is a synapse (connection) from a node onto itself.
It is used together with the `'number_of_connections'` option.
allow_multapses : bool, optional, default: True
Node A is connected to node B by a multapse if there are synapses
(connections) from A to B.
It is used together with the `'number_of_connections'` option.
connection_type : str
The type of connections can be either `'convergent'` or
`'divergent'`. In case of convergent connections, the target
layer is considered as driver layer and the source layer as pool
layer - and vice versa for divergent connections.
delays : [float | dict | Parameter object], optional, default: 1.0
Delays can be constant, randomized or distance-dependent according
to a provided function.
Information on available functions can be found in the
documentation on the function ``CreateParameter``.
kernel : [float | dict | Parameter object], optional, default: 1.0
A kernel is a function mapping the distance (or displacement)
between a driver and a pool node to a connection probability. The
default kernel is 1.0, i.e., connections are created with
certainty.
Information on available functions can be found in the
documentation on the function ``CreateParameter``.
mask : [dict | Mask object], optional
The mask defines which pool nodes are considered as potential
targets for each driver node. Parameters of the different
available masks in 2 and 3 dimensions are also defined in
dictionaries.
If no mask is specified, all neurons from the pool layer are
possible targets for each driver node.
Information on available masks can be found in the documentation on
the function ``CreateMask``.
number_of_connections : int, optional
Prescribed number of connections for each driver node. The actual
connections being created are picked at random from all the
candidate connections.
synapse_model : str, optional
The default synapse model in NEST is used if not specified
otherwise.
weights : [float | dict | Parameter object], optional, default: 1.0
Weights can be constant, randomized or distance-dependent according
to a provided function.
Information on available functions can be found in the
documentation on the function ``CreateParameter``.
Notes
-----
* In the case of free probabilistic connections (in contrast to
prescribing the number of connections), each possible driver-pool
pair is inspected exactly once so that there will be at most one
connection between each driver-pool pair.
* Periodic boundary conditions are always applied in the pool layer.
It is irrelevant whether the driver layer has periodic boundary
conditions or not.
* By default, Topology does not accept masks that are wider than the
pool layer when using periodic boundary conditions.
Kernel, weight and delay functions always consider the shortest
distance (displacement) between driver and pool node.
**Example**
::
import nest.topology as tp
# create a layer
l = tp.CreateLayer({'rows' : 11,
'columns' : 11,
'extent' : [11.0, 11.0],
'elements' : 'iaf_psc_alpha'})
# connectivity specifications with a mask
conndict1 = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left' : [-2.0, -1.0],
'upper_right' : [2.0, 1.0]}}}
# connect layer l with itself according to the given
# specifications
tp.ConnectLayers(l, l, conndict1)
# connection dictionary with distance-dependent kernel
# (given as Parameter object) and randomized weights
# (given as a dictionary)
gauss_kernel = tp.CreateParameter('gaussian', {'p_center' : 1.0,
'sigma' : 1.0})
conndict2 = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 2.0}},
'kernel': gauss_kernel,
'weights': {'uniform': {'min': 0.2, 'max': 0.8}}}
"""
if not nest.is_sequence_of_gids(pre):
raise TypeError("pre must be a sequence of GIDs")
if not nest.is_sequence_of_gids(pre):
raise TypeError("post must be a sequence of GIDs")
if not len(pre) == len(post):
raise nest.NESTError("pre and post must have the same length.")
# ensure projections is list of full length
projections = nest.broadcast(projections, len(pre), (dict, ),
"projections")
# Replace python classes with SLI datums
def fixdict(d):
d = d.copy()
for k, v in d.items():
if isinstance(v, dict):
d[k] = fixdict(v)
elif isinstance(v, Mask) or isinstance(v, Parameter):
d[k] = v._datum
return d
projections = [fixdict(p) for p in projections]
topology_func('3 arraystore { ConnectLayers } ScanThread', pre, post,
projections)
def GetPosition(nodes):
"""
Return the spatial locations of nodes.
Parameters
----------
nodes : tuple/list of int(s)
List of GIDs
Returns
-------
out : tuple of tuple(s)
List of positions as 2- or 3-element lists
See also
--------
Displacement : Get vector of lateral displacement between nodes.
Distance : Get lateral distance between nodes.
DumpLayerConnections : Write connectivity information to file.
DumpLayerNodes : Write layer node positions to file.
Notes
-----
* The functions ``GetPosition``, ``Displacement`` and ``Distance`` now
only works for nodes local to the current MPI process, if used in a
MPI-parallel simulation.
**Example**
::
import nest
import nest.topology as tp
# create a layer
l = tp.CreateLayer({'rows' : 5,
'columns' : 5,
'elements' : 'iaf_psc_alpha'})
# retrieve positions of all (local) nodes belonging to the layer
gids = nest.GetNodes(l, {'local_only': True})[0]
tp.GetPosition(gids)
"""
if not nest.is_sequence_of_gids(nodes):
raise TypeError("nodes must be a sequence of GIDs")
return topology_func('{ GetPosition } Map', nodes)
def GetLayer(nodes):
"""
Return the layer to which nodes belong.
Parameters
----------
nodes : tuple/list of int(s)
List of neuron GIDs
Returns
-------
out : tuple of int(s)
List of layer GIDs
See also
--------
GetElement : Return the node(s) at the location(s) in the given layer(s).
GetPosition : Return the spatial locations of nodes.
Notes
-----
-
**Example**
::
import nest.topology as tp
# create a layer
l = tp.CreateLayer({'rows' : 5,
'columns' : 5,
'elements' : 'iaf_psc_alpha'})
# get layer GID of nodes in layer
tp.GetLayer(nest.GetNodes(l)[0])
"""
if not nest.is_sequence_of_gids(nodes):
raise TypeError("nodes must be a sequence of GIDs")
return topology_func('{ GetLayer } Map', nodes)
def GetElement(layers, locations):
"""
Return the node(s) at the location(s) in the given layer(s).
This function works for fixed grid layers only.
* If layers contains a single GID and locations is a single 2-element
array giving a grid location, return a list of GIDs of layer elements
at the given location.
* If layers is a list with a single GID and locations is a list of
coordinates, the function returns a list of lists with GIDs of the nodes
at all locations.
* If layers is a list of GIDs and locations single 2-element array giving
a grid location, the function returns a list of lists with the GIDs of
the nodes in all layers at the given location.
* If layers and locations are lists, it returns a nested list of GIDs, one
list for each layer and each location.
Parameters
----------
layers : tuple/list of int(s)
List of layer GIDs
locations : [tuple/list of floats | tuple/list of tuples/lists of floats]
2-element list with coordinates of a single grid location,
or list of 2-element lists of coordinates for 2-dimensional layers,
i.e., on the format [column, row]
Returns
-------
out : tuple of int(s)
List of GIDs
See also
--------
GetLayer : Return the layer to which nodes belong.
FindNearestElement: Return the node(s) closest to the location(s) in the
given layer(s).
GetPosition : Return the spatial locations of nodes.
Notes
-----
-
**Example**
::
import nest.topology as tp
# create a layer
l = tp.CreateLayer({'rows' : 5,
'columns' : 4,
'elements' : 'iaf_psc_alpha'})
# get GID of element in last row and column
tp.GetElement(l, [3, 4])
"""
if not nest.is_sequence_of_gids(layers):
raise TypeError("layers must be a sequence of GIDs")
if not len(layers) > 0:
raise nest.NESTError("layers cannot be empty")
if not (nest.is_iterable(locations) and len(locations) > 0):
raise nest.NESTError(
"locations must be coordinate array or list of coordinate arrays")
# ensure that all layers are grid-based, otherwise one ends up with an
# incomprehensible error message
try:
topology_func('{ [ /topology [ /rows /columns ] ] get ; } forall',
layers)
except:
raise nest.NESTError(
"layers must contain only grid-based topology layers")
# SLI GetElement returns either single GID or list
def make_tuple(x):
if not nest.is_iterable(x):
return (x, )
else:
return x
if nest.is_iterable(locations[0]):
# layers and locations are now lists
nodes = topology_func(
'/locs Set { /lyr Set locs { lyr exch GetElement } Map } Map',
layers, locations)
node_list = tuple(
tuple(make_tuple(nodes_at_loc) for nodes_at_loc in nodes_in_lyr)
for nodes_in_lyr in nodes)
else:
# layers is list, locations is a single location
nodes = topology_func('/loc Set { loc GetElement } Map', layers,
locations)
node_list = tuple(make_tuple(nodes_in_lyr) for nodes_in_lyr in nodes)
# If only a single layer is given, un-nest list
if len(layers) == 1:
node_list = node_list[0]
return node_list
def FindNearestElement(layers, locations, find_all=False):
"""
Return the node(s) closest to the location(s) in the given layer(s).
This function works for fixed grid layers only.
* If layers contains a single GID and locations is a single 2-element
array giving a grid location, return a list of GIDs of layer elements
at the given location.
* If layers is a list with a single GID and locations is a list of
coordinates, the function returns a list of lists with GIDs of the nodes
at all locations.
* If layers is a list of GIDs and locations single 2-element array giving
a grid location, the function returns a list of lists with the GIDs of
the nodes in all layers at the given location.
* If layers and locations are lists, it returns a nested list of GIDs, one
list for each layer and each location.
Parameters
----------
layers : tuple/list of int(s)
List of layer GIDs
locations : tuple(s)/list(s) of tuple(s)/list(s)
2-element list with coordinates of a single position, or list of
2-element list of positions
find_all : bool, default: False
If there are several nodes with same minimal distance, return only the
first found, if `False`.
If `True`, instead of returning a single GID, return a list of GIDs
containing all nodes with minimal distance.
Returns
-------
out : tuple of int(s)
List of node GIDs
See also
--------
FindCenterElement : Return GID(s) of node closest to center of layers.
GetElement : Return the node(s) at the location(s) in the given layer(s).
GetPosition : Return the spatial locations of nodes.
Notes
-----
-
**Example**
::
import nest.topology as tp
# create a layer
l = tp.CreateLayer({'rows' : 5,
'columns' : 5,
'elements' : 'iaf_psc_alpha'})
# get GID of element closest to some location
tp.FindNearestElement(l, [3.0, 4.0], True)
"""
import numpy
if not nest.is_sequence_of_gids(layers):
raise TypeError("layers must be a sequence of GIDs")
if not len(layers) > 0:
raise nest.NESTError("layers cannot be empty")
if not nest.is_iterable(locations):
raise TypeError(
"locations must be coordinate array or list of coordinate arrays")
# ensure locations is sequence, keeps code below simpler
if not nest.is_iterable(locations[0]):
locations = (locations, )
result = [] # collect one list per layer
# loop over layers
for lyr in layers:
els = nest.GetChildren((lyr, ))[0]
lyr_result = []
# loop over locations
for loc in locations:
d = Distance(numpy.array(loc), els)
if not find_all:
dx = numpy.argmin(d) # finds location of one minimum
lyr_result.append(els[dx])
else:
mingids = list(els[:1])
minval = d[0]
for idx in range(1, len(els)):
if d[idx] < minval:
mingids = [els[idx]]
minval = d[idx]
elif numpy.abs(d[idx] - minval) <= 1e-14 * minval:
mingids.append(els[idx])
lyr_result.append(tuple(mingids))
result.append(tuple(lyr_result))
# If both layers and locations are multi-element lists, result shall remain
# a nested list. Otherwise, either the top or the second level is a single
# element list and we flatten.
assert (len(result) > 0)
if len(result) == 1:
assert (len(layers) == 1)
return result[0]
elif len(result[0]) == 1:
assert (len(locations) == 1)
return tuple(el[0] for el in result)
else:
return tuple(result)
def _check_displacement_args(from_arg, to_arg, caller):
"""
Internal helper function to check arguments to Displacement
and Distance and make them lists of equal length.
"""
import numpy
if isinstance(from_arg, numpy.ndarray):
from_arg = (from_arg, )
elif not (nest.is_iterable(from_arg) and len(from_arg) > 0):
raise nest.NESTError(
"%s: from_arg must be lists of GIDs or positions" % caller)
# invariant: from_arg is list
if not nest.is_sequence_of_gids(to_arg):
raise nest.NESTError("%s: to_arg must be lists of GIDs" % caller)
# invariant: from_arg and to_arg are sequences
if len(from_arg) > 1 and len(to_arg) > 1 and not len(from_arg) == len(
to_arg):
raise nest.NESTError(
"%s: If to_arg and from_arg are lists, they must have same length."
% caller)
# invariant: from_arg and to_arg have equal length,
# or (at least) one has length 1
if len(from_arg) == 1:
from_arg = from_arg * len(to_arg) # this is a no-op if len(to_arg)==1
if len(to_arg) == 1:
to_arg = to_arg * len(from_arg) # this is a no-op if len(from_arg)==1
# invariant: from_arg and to_arg have equal length
return from_arg, to_arg
def Displacement(from_arg, to_arg):
"""
Get vector of lateral displacement from node(s) `from_arg`
to node(s) `to_arg`.
Displacement is always measured in the layer to which the `to_arg` node
belongs. If a node in the `from_arg` list belongs to a different layer,
its location is projected into the `to_arg` layer. If explicit positions
are given in the `from_arg` list, they are interpreted in the `to_arg`
layer.
Displacement is the shortest displacement, taking into account
periodic boundary conditions where applicable.
* If one of `from_arg` or `to_arg` has length 1, and the other is longer,
the displacement from/to the single item to all other items is given.
* If `from_arg` and `to_arg` both have more than two elements, they have
to be lists of the same length and the displacement for each pair is
returned.
Parameters
----------
from_arg : [tuple/list of int(s) | tuple/list of tuples/lists of floats]
List of GIDs or position(s)
to_arg : tuple/list of int(s)
List of GIDs
Returns
-------
out : tuple
Displacement vectors between pairs of nodes in `from_arg` and `to_arg`
See also
--------
Distance : Get lateral distances between nodes.
DumpLayerConnections : Write connectivity information to file.
GetPosition : Return the spatial locations of nodes.
Notes
-----
* The functions ``GetPosition``, ``Displacement`` and ``Distance`` now
only works for nodes local to the current MPI process, if used in a
MPI-parallel simulation.
**Example**
::
import nest.topology as tp
# create a layer
l = tp.CreateLayer({'rows' : 5,
'columns' : 5,
'elements' : 'iaf_psc_alpha'})
# displacement between node 2 and 3
print(tp.Displacement([2], [3]))
# displacment between the position (0.0., 0.0) and node 2
print(tp.Displacement([(0.0, 0.0)], [2]))
"""
from_arg, to_arg = _check_displacement_args(from_arg, to_arg,
'Displacement')
return topology_func('{ Displacement } MapThread', [from_arg, to_arg])
def Distance(from_arg, to_arg):
"""
Get lateral distances from node(s) from_arg to node(s) to_arg.
The distance between two nodes is the length of its displacement.
Distance is always measured in the layer to which the `to_arg` node
belongs. If a node in the `from_arg` list belongs to a different layer,
its location is projected into the `to_arg` layer. If explicit positions
are given in the `from_arg` list, they are interpreted in the `to_arg`
layer.
Distance is the shortest distance, taking into account periodic boundary
conditions where applicable.
* If one of `from_arg` or `to_arg` has length 1, and the other is longer,
the displacement from/to the single item to all other items is given.
* If `from_arg` and `to_arg` both have more than two elements, they have
to be lists of the same length and the distance for each pair is
returned.
Parameters
----------
from_arg : [tuple/list of ints | tuple/list with tuples/lists of floats]
List of GIDs or position(s)
to_arg : tuple/list of ints
List of GIDs
Returns
-------
out : tuple
Distances between from and to
See also
--------
Displacement : Get vector of lateral displacements between nodes.
DumpLayerConnections : Write connectivity information to file.
GetPosition : Return the spatial locations of nodes.
Notes
-----
* The functions ``GetPosition``, ``Displacement`` and ``Distance`` now
only works for nodes local to the current MPI process, if used in a
MPI-parallel simulation.
**Example**
::
import nest.topology as tp
# create a layer
l = tp.CreateLayer({'rows' : 5,
'columns' : 5,
'elements' : 'iaf_psc_alpha'})
# distance between node 2 and 3
print(tp.Distance([2], [3]))
# distance between the position (0.0., 0.0) and node 2
print(tp.Distance([(0.0, 0.0)], [2]))
"""
from_arg, to_arg = _check_displacement_args(from_arg, to_arg, 'Distance')
return topology_func('{ Distance } MapThread', [from_arg, to_arg])
def _rank_specific_filename(basename):
"""Returns file name decorated with rank."""
if nest.NumProcesses() == 1:
return basename
else:
np = nest.NumProcesses()
np_digs = len(str(np - 1)) # for pretty formatting
rk = nest.Rank()
dot = basename.find('.')
if dot < 0:
return '%s-%0*d' % (basename, np_digs, rk)
else:
return '%s-%0*d%s' % (basename[:dot], np_digs, rk, basename[dot:])
def DumpLayerNodes(layers, outname):
"""
Write GID and position data of layer(s) to file.
Write GID and position data to layer(s) file. For each node in a layer,
a line with the following information is written:
::
GID x-position y-position [z-position]
If `layers` contains several GIDs, data for all layers will be written to a
single file.
Parameters
----------
layers : tuple/list of int(s)
List of GIDs of a Topology layer
outname : str
Name of file to write to (existing files are overwritten)
Returns
-------
out : None
See also
--------
DumpLayerConnections : Write connectivity information to file.
GetPosition : Return the spatial locations of nodes.
Notes
-----
* If calling this function from a distributed simulation, this function
will write to one file per MPI rank.
* File names are formed by adding the MPI Rank into the file name before
the file name suffix.
* Each file stores data for nodes local to that file.
**Example**
::
import nest.topology as tp
# create a layer
l = tp.CreateLayer({'rows' : 5,
'columns' : 5,
'elements' : 'iaf_psc_alpha'})
# write layer node positions to file
tp.DumpLayerNodes(l, 'positions.txt')
"""
topology_func("""
(w) file exch { DumpLayerNodes } forall close
""",
layers, _rank_specific_filename(outname))
def DumpLayerConnections(layers, synapse_model, outname):
"""
Write connectivity information to file.
This function writes connection information to file for all outgoing
connections from the given layers with the given synapse model.
Data for all layers in the list is combined.
For each connection, one line is stored, in the following format:
::
source_gid target_gid weight delay dx dy [dz]
where (dx, dy [, dz]) is the displacement from source to target node.
If targets do not have positions (eg spike detectors outside any layer),
NaN is written for each displacement coordinate.
Parameters
----------
layers : tuple/list of int(s)
List of GIDs of a Topology layer
synapse_model : str
NEST synapse model
outname : str
Name of file to write to (will be overwritten if it exists)
Returns
-------
out : None
See also
--------
DumpLayerNodes : Write layer node positions to file.
GetPosition : Return the spatial locations of nodes.
nest.GetConnections : Return connection identifiers between
sources and targets
Notes
-----
* If calling this function from a distributed simulation, this function
will write to one file per MPI rank.
* File names are formed by inserting
the MPI Rank into the file name before the file name suffix.
* Each file stores data for local nodes.
**Example**
::
import nest.topology as tp
# create a layer
l = tp.CreateLayer({'rows' : 5,
'columns' : 5,
'elements' : 'iaf_psc_alpha'})
tp.ConnectLayers(l,l, {'connection_type': 'divergent',
'synapse_model': 'static_synapse'})
# write connectivity information to file
tp.DumpLayerConnections(l, 'static_synapse', 'connections.txt')
"""
topology_func("""
/oname Set
cvlit /synmod Set
/lyrs Set
oname (w) file lyrs
{ synmod DumpLayerConnections } forall close
""",
layers, synapse_model, _rank_specific_filename(outname))
def FindCenterElement(layers):
"""
Return GID(s) of node closest to center of layers.
Parameters
----------
layers : tuple/list of int(s)
List of layer GIDs
Returns
-------
out : tuple of int(s)
A list containing for each layer the GID of the node closest to the
center of the layer, as specified in the layer parameters. If several
nodes are equally close to the center, an arbitrary one of them is
returned.
See also
--------
FindNearestElement : Return the node(s) closest to the location(s) in the
given layer(s).
GetElement : Return the node(s) at the location(s) in the given layer(s).
GetPosition : Return the spatial locations of nodes.
Notes
-----
-
**Example**
::
import nest.topology as tp
# create a layer
l = tp.CreateLayer({'rows' : 5,
'columns' : 5,
'elements' : 'iaf_psc_alpha'})
# get GID of the element closest to the center of the layer
tp.FindCenterElement(l)
"""
if not nest.is_sequence_of_gids(layers):
raise TypeError("layers must be a sequence of GIDs")
# Do each layer on its own since FindNearestElement does not thread
return tuple(FindNearestElement((lyr, ),
nest.GetStatus((lyr, ), 'topology')[0][
'center'])[0]
for lyr in layers)
def GetTargetNodes(sources, tgt_layer, tgt_model=None, syn_model=None):
"""
Obtain targets of a list of sources in given target layer.
Parameters
----------
sources : tuple/list of int(s)
List of GID(s) of source neurons
tgt_layer : tuple/list of int(s)
Single-element list with GID of tgt_layer
tgt_model : [None | str], optional, default: None
Return only target positions for a given neuron model.
syn_model : [None | str], optional, default: None
Return only target positions for a given synapse model.
Returns
-------
out : tuple of list(s) of int(s)
List of GIDs of target neurons fulfilling the given criteria.
It is a list of lists, one list per source.
For each neuron in `sources`, this function finds all target elements
in `tgt_layer`. If `tgt_model` is not given (default), all targets are
returned, otherwise only targets of specific type, and similarly for
syn_model.
See also
--------
GetTargetPositions : Obtain positions of targets of a list of sources in a
given target layer.
nest.GetConnections : Return connection identifiers between
sources and targets
Notes
-----
* For distributed simulations, this function only returns targets on the
local MPI process.
**Example**
::
import nest.topology as tp
# create a layer
l = tp.CreateLayer({'rows' : 11,
'columns' : 11,
'extent' : [11.0, 11.0],
'elements' : 'iaf_psc_alpha'})
# connectivity specifications with a mask
conndict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left' : [-2.0, -1.0],
'upper_right': [2.0, 1.0]}}}
# connect layer l with itself according to the given
# specifications
tp.ConnectLayers(l, l, conndict)
# get the GIDs of the targets of the source neuron with GID 5
tp.GetTargetNodes([5], l)
"""
if not nest.is_sequence_of_gids(sources):
raise TypeError("sources must be a sequence of GIDs")
if not nest.is_sequence_of_gids(tgt_layer):
raise TypeError("tgt_layer must be a sequence of GIDs")
if len(tgt_layer) != 1:
raise nest.NESTError("tgt_layer must be a one-element list")
with nest.SuppressedDeprecationWarning('GetLeaves'):
# obtain local nodes in target layer, to pass to GetConnections
tgt_nodes = nest.GetLeaves(tgt_layer,
properties={'model': tgt_model}
if tgt_model else None,
local_only=True)[0]
conns = nest.GetConnections(sources, tgt_nodes, synapse_model=syn_model)
# conns is a flat list of connections.
# Re-organize into one list per source, containing only target GIDs.
src_tgt_map = dict((sgid, []) for sgid in sources)
for conn in conns:
src_tgt_map[conn[0]].append(conn[1])
# convert dict to nested list in same order as sources
return tuple(src_tgt_map[sgid] for sgid in sources)
def GetTargetPositions(sources, tgt_layer, tgt_model=None, syn_model=None):
"""
Obtain positions of targets of a list of sources in a given target layer.
Parameters
----------
sources : tuple/list of int(s)
List of GID(s) of source neurons
tgt_layer : tuple/list of int(s)
Single-element list with GID of tgt_layer
tgt_model : [None | str], optional, default: None
Return only target positions for a given neuron model.
syn_type : [None | str], optional, default: None
Return only target positions for a given synapse model.
Returns
-------
out : tuple of tuple(s) of tuple(s) of floats
Positions of target neurons fulfilling the given criteria as a nested
list, containing one list of positions per node in sources.
For each neuron in `sources`, this function finds all target elements
in `tgt_layer`. If `tgt_model` is not given (default), all targets are
returned, otherwise only targets of specific type, and similarly for
syn_model.
See also
--------
GetTargetNodes : Obtain targets of a list of sources in a given target
layer.
Notes
-----
* For distributed simulations, this function only returns targets on the
local MPI process.
**Example**
::
import nest.topology as tp
# create a layer
l = tp.CreateLayer({'rows' : 11,
'columns' : 11,
'extent' : [11.0, 11.0],
'elements' : 'iaf_psc_alpha'})
# connectivity specifications with a mask
conndict1 = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left' : [-2.0, -1.0],
'upper_right' : [2.0, 1.0]}}}
# connect layer l with itself according to the given
# specifications
tp.ConnectLayers(l, l, conndict1)
# get the positions of the targets of the source neuron with GID 5
tp.GetTargetPositions([5], l)
"""
return tuple(GetPosition(nodes) for nodes
in GetTargetNodes(sources, tgt_layer, tgt_model, syn_model))
def _draw_extent(ax, xctr, yctr, xext, yext):
"""Draw extent and set aspect ration, limits"""
import matplotlib.pyplot as plt
# thin gray line indicating extent
llx, lly = xctr - xext / 2.0, yctr - yext / 2.0
urx, ury = llx + xext, lly + yext
ax.add_patch(
plt.Rectangle((llx, lly), xext, yext, fc='none', ec='0.5', lw=1,
zorder=1))
# set limits slightly outside extent
ax.set(aspect='equal',
xlim=(llx - 0.05 * xext, urx + 0.05 * xext),
ylim=(lly - 0.05 * yext, ury + 0.05 * yext),
xticks=tuple(), yticks=tuple())
def PlotLayer(layer, fig=None, nodecolor='b', nodesize=20):
"""
Plot all nodes in a layer.
This function plots only top-level nodes, not the content of composite
nodes.
Parameters
----------
layer : tuple/list of int(s)
GID of layer to plot, must be tuple/list of length 1
fig : [None | matplotlib.figure.Figure object], optional, default: None
Matplotlib figure to plot to. If not given, a new figure is
created.
nodecolor : [None | any matplotlib color], optional, default: 'b'
Color for nodes
nodesize : float, optional, default: 20
Marker size for nodes
Returns
-------
out : `matplotlib.figure.Figure` object
See also
--------
PlotKernel : Add indication of mask and kernel to axes.
PlotTargets : Plot all targets of a given source.
matplotlib.figure.Figure : matplotlib Figure class
Notes
-----
* Do not use this function in distributed simulations.
**Example**
::
import nest.topology as tp
import matplotlib.pyplot as plt
# create a layer
l = tp.CreateLayer({'rows' : 11,
'columns' : 11,
'extent' : [11.0, 11.0],
'elements' : 'iaf_psc_alpha'})
# plot layer with all its nodes
tp.PlotLayer(l)
plt.show()
"""
import matplotlib.pyplot as plt
if len(layer) != 1:
raise ValueError("layer must contain exactly one GID.")
# get layer extent
ext = nest.GetStatus(layer, 'topology')[0]['extent']
if len(ext) == 2:
# 2D layer
# get layer extent and center, x and y
xext, yext = ext
xctr, yctr = nest.GetStatus(layer, 'topology')[0]['center']
with nest.SuppressedDeprecationWarning('GetChildren'):
# extract position information, transpose to list of x and y pos
xpos, ypos = zip(*GetPosition(nest.GetChildren(layer)[0]))
if fig is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = fig.gca()
ax.scatter(xpos, ypos, s=nodesize, facecolor=nodecolor,
edgecolor='none')
_draw_extent(ax, xctr, yctr, xext, yext)
elif len(ext) == 3:
# 3D layer
from mpl_toolkits.mplot3d import Axes3D
with nest.SuppressedDeprecationWarning('GetChildren'):
# extract position information, transpose to list of x,y,z pos
pos = zip(*GetPosition(nest.GetChildren(layer)[0]))
if fig is None:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
else:
ax = fig.gca()
ax.scatter3D(*pos, s=nodesize, facecolor=nodecolor, edgecolor='none')
plt.draw_if_interactive()
else:
raise nest.NESTError("unexpected dimension of layer")
return fig
def PlotTargets(src_nrn, tgt_layer, tgt_model=None, syn_type=None, fig=None,
mask=None, kernel=None,
src_color='red', src_size=50, tgt_color='blue', tgt_size=20,
mask_color='red', kernel_color='red'):
"""
Plot all targets of source neuron `src_nrn` in a target layer `tgt_layer`.
Parameters
----------
src_nrn : int
GID of source neuron (as single-element list)
tgt_layer : tuple/list of int(s)
GID of tgt_layer (as single-element list)
tgt_model : [None | str], optional, default: None
Show only targets of a given model.
syn_type : [None | str], optional, default: None
Show only targets connected to with a given synapse type
fig : [None | matplotlib.figure.Figure object], optional, default: None
Matplotlib figure to plot to. If not given, a new figure is created.
mask : [None | dict], optional, default: None
Draw topology mask with targets; see ``PlotKernel`` for details.
kernel : [None | dict], optional, default: None
Draw topology kernel with targets; see ``PlotKernel`` for details.
src_color : [None | any matplotlib color], optional, default: 'red'
Color used to mark source node position
src_size : float, optional, default: 50
Size of source marker (see scatter for details)
tgt_color : [None | any matplotlib color], optional, default: 'blue'
Color used to mark target node positions
tgt_size : float, optional, default: 20
Size of target markers (see scatter for details)
mask_color : [None | any matplotlib color], optional, default: 'red'
Color used for line marking mask
kernel_color : [None | any matplotlib color], optional, default: 'red'
Color used for lines marking kernel
Returns
-------
out : matplotlib.figure.Figure object
See also
--------
GetTargetNodes : Obtain targets of a list of sources in a given target
layer.
GetTargetPositions : Obtain positions of targets of a list of sources in a
given target layer.
PlotKernel : Add indication of mask and kernel to axes.
PlotLayer : Plot all nodes in a layer.
matplotlib.pyplot.scatter : matplotlib scatter plot.
Notes
-----
* Do not use this function in distributed simulations.
**Example**
::
import nest.topology as tp
import matplotlib.pyplot as plt
# create a layer
l = tp.CreateLayer({'rows' : 11,
'columns' : 11,
'extent' : [11.0, 11.0],
'elements' : 'iaf_psc_alpha'})
# connectivity specifications with a mask
conndict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left' : [-2.0, -1.0],
'upper_right' : [2.0, 1.0]}}}
# connect layer l with itself according to the given
# specifications
tp.ConnectLayers(l, l, conndict)
# plot the targets of the source neuron with GID 5
tp.PlotTargets([5], l)
plt.show()
"""
import matplotlib.pyplot as plt
# get position of source
srcpos = GetPosition(src_nrn)[0]
# get layer extent and center, x and y
ext = nest.GetStatus(tgt_layer, 'topology')[0]['extent']
if len(ext) == 2:
# 2D layer
# get layer extent and center, x and y
xext, yext = ext
xctr, yctr = nest.GetStatus(tgt_layer, 'topology')[0]['center']
if fig is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = fig.gca()
# get positions, reorganize to x and y vectors
tgtpos = GetTargetPositions(src_nrn, tgt_layer, tgt_model, syn_type)
if tgtpos:
xpos, ypos = zip(*tgtpos[0])
ax.scatter(xpos, ypos, s=tgt_size, facecolor=tgt_color,
edgecolor='none')
ax.scatter(srcpos[:1], srcpos[1:], s=src_size, facecolor=src_color,
edgecolor='none',
alpha=0.4, zorder=-10)
_draw_extent(ax, xctr, yctr, xext, yext)
if mask is not None or kernel is not None:
PlotKernel(ax, src_nrn, mask, kernel, mask_color, kernel_color)
else:
# 3D layer
from mpl_toolkits.mplot3d import Axes3D
if fig is None:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
else:
ax = fig.gca()
# get positions, reorganize to x,y,z vectors
tgtpos = GetTargetPositions(src_nrn, tgt_layer, tgt_model, syn_type)
if tgtpos:
xpos, ypos, zpos = zip(*tgtpos[0])
ax.scatter3D(xpos, ypos, zpos, s=tgt_size, facecolor=tgt_color,
edgecolor='none')
ax.scatter3D(srcpos[:1], srcpos[1:2], srcpos[2:], s=src_size,
facecolor=src_color, edgecolor='none',
alpha=0.4, zorder=-10)
plt.draw_if_interactive()
return fig
def PlotKernel(ax, src_nrn, mask, kern=None, mask_color='red',
kernel_color='red'):
"""
Add indication of mask and kernel to axes.
Adds solid red line for mask. For doughnut mask show inner and outer line.
If kern is Gaussian, add blue dashed lines marking 1, 2, 3 sigma.
This function ignores periodic boundary conditions.
Usually, this function is invoked by ``PlotTargets``.
Parameters
----------
ax : matplotlib.axes.AxesSubplot,
subplot reference returned by PlotTargets
src_nrn : int
GID of source neuron (as single element list), mask and kernel
plotted relative to it
mask : dict
Mask used in creating connections.
kern : [None | dict], optional, default: None
Kernel used in creating connections
mask_color : [None | any matplotlib color], optional, default: 'red'
Color used for line marking mask
kernel_color : [None | any matplotlib color], optional, default: 'red'
Color used for lines marking kernel
Returns
-------
out : None
See also
--------
CreateMask : Create a ``Mask`` object. Documentation on available spatial
masks.
CreateParameter : Create a ``Parameter`` object. Documentation on available
parameters for distance dependency and randomization.
PlotLayer : Plot all nodes in a layer.
Notes
-----
* Do not use this function in distributed simulations.
**Example**
::
import nest.topology as tp
import matplotlib.pyplot as plt
# create a layer
l = tp.CreateLayer({'rows' : 11,
'columns' : 11,
'extent' : [11.0, 11.0],
'elements' : 'iaf_psc_alpha'})
# connectivity specifications
mask_dict = {'rectangular': {'lower_left' : [-2.0, -1.0],
'upper_right' : [2.0, 1.0]}}
kernel_dict = {'gaussian': {'p_center' : 1.0,
'sigma' : 1.0}}
conndict = {'connection_type': 'divergent',
'mask' : mask_dict,
'kernel' : kernel_dict}
# connect layer l with itself according to the given
# specifications
tp.ConnectLayers(l, l, conndict)
# set up figure
fig, ax = plt.subplots()
# plot layer nodes
tp.PlotLayer(l, fig)
# choose center element of the layer as source node
ctr_elem = tp.FindCenterElement(l)
# plot mask and kernel of the center element
tp.PlotKernel(ax, ctr_elem, mask=mask_dict, kern=kernel_dict)
"""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
# minimal checks for ax having been created by PlotKernel
if ax and not isinstance(ax, matplotlib.axes.Axes):
raise ValueError('ax must be matplotlib.axes.Axes instance.')
srcpos = np.array(GetPosition(src_nrn)[0])
if 'anchor' in mask:
offs = np.array(mask['anchor'])
else:
offs = np.array([0., 0.])
if 'circular' in mask:
r = mask['circular']['radius']
ax.add_patch(plt.Circle(srcpos + offs, radius=r, zorder=-1000,
fc='none', ec=mask_color, lw=3))
elif 'doughnut' in mask:
r_in = mask['doughnut']['inner_radius']
r_out = mask['doughnut']['outer_radius']
ax.add_patch(plt.Circle(srcpos + offs, radius=r_in, zorder=-1000,
fc='none', ec=mask_color, lw=3))
ax.add_patch(plt.Circle(srcpos + offs, radius=r_out, zorder=-1000,
fc='none', ec=mask_color, lw=3))
elif 'rectangular' in mask:
ll = mask['rectangular']['lower_left']
ur = mask['rectangular']['upper_right']
ax.add_patch(
plt.Rectangle(srcpos + ll + offs, ur[0] - ll[0], ur[1] - ll[1],
zorder=-1000, fc='none', ec=mask_color, lw=3))
elif 'elliptical' in mask:
width = mask['elliptical']['major_axis']
height = mask['elliptical']['minor_axis']
if 'azimuth_angle' in mask['elliptical']:
angle = mask['elliptical']['azimuth_angle']
else:
angle = 0.0
if 'anchor' in mask['elliptical']:
anchor = mask['elliptical']['anchor']
else:
anchor = [0., 0.]
ax.add_patch(
matplotlib.patches.Ellipse(srcpos + offs + anchor, width, height,
angle=angle, zorder=-1000, fc='none',
ec=mask_color, lw=3))
else:
raise ValueError(
'Mask type cannot be plotted with this version of PyTopology.')
if kern is not None and isinstance(kern, dict):
if 'gaussian' in kern:
sigma = kern['gaussian']['sigma']
for r in range(3):
ax.add_patch(plt.Circle(srcpos + offs, radius=(r + 1) * sigma,
zorder=-1000,
fc='none', ec=kernel_color, lw=3,
ls='dashed'))
else:
raise ValueError('Kernel type cannot be plotted with this ' +
'version of PyTopology')
plt.draw()
def SelectNodesByMask(layer, anchor, mask_obj):
"""
Obtain the GIDs inside a masked area of a topology layer.
The function finds and returns all the GIDs inside a given mask of a single
layer. It works on both 2-dimensional and 3-dimensional masks and layers.
All mask types are allowed, including combined masks.
Parameters
----------
layer : tuple/list of int
List containing the single layer to select nodes from.
anchor : tuple/list of double
List containing center position of the layer. This is the point from
where we start to search.
mask_obj: object
Mask object specifying chosen area.
Returns
-------
out : list of int(s)
GID(s) of nodes/elements inside the mask.
"""
if len(layer) != 1:
raise ValueError("layer must contain exactly one GID.")
mask_datum = mask_obj._datum
gid_list = topology_func('SelectNodesByMask', layer[0], anchor, mask_datum)
return gid_list
| gpl-2.0 |
heli522/scikit-learn | examples/ensemble/plot_forest_iris.py | 335 | 6271 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
arianna-bis/glass-box-nmt | plots/languageplots.py | 1 | 5684 | """
========
Barchart
========
A bar plot with errorbars and height labels on individual bars
"""
import numpy as np
import matplotlib.pyplot as plt
base = {}
baseStd = {}
acc = {}
acc['embed'] = {}
acc['lstmo'] = {}
std = {}
std['embed'] = {}
std['lstmo'] = {}
# LANGS: FR-IT, FR-DE, FR-EN
base['gen'] = 0.5030
base['num'] = 0.6968
base['Per'] = 0.6141
base['Ten'] = 0.7629
base['Moo'] = 0.2450
base['genITM'] = base['gen']
base['avgAllFeats'] = np.mean([base['gen'],base['num'],base['Per'],base['Ten'],base['Moo']])
base['genNumTen'] = np.mean([base['gen'],base['num'],base['Ten']])
baseStd['gen'] = 0.0043
baseStd['num'] = 0.0073
baseStd['Per'] = 0.0392
baseStd['Ten'] = 0.0238
baseStd['Moo'] = 0.0504
baseStd['genITM'] = baseStd['gen']
#baseStd['avgAllFeats'] = np.mean([baseStd['gen'],baseStd['num'],baseStd['Per'],baseStd['Ten'],baseStd['Moo']])
baseStd['avgAllFeats'] = 0 ## HACK!
baseStd['genNumTen'] = 0 ## HACK!
#gender
acc['embed']['gen'] = (0.5804, 0.5304, 0.5085)
std['embed']['gen'] = (0.0272, 0.0321, 0.0357)
#gender with itNoMasc (2nd lang)
acc['embed']['genITM'] = (0.5804, 0.5196, 0.5304, 0.5085)
std['embed']['genITM'] = (0.0272, 0.0226, 0.0321, 0.0357)
# number
acc['embed']['num'] = (0.6804, 0.6623, 0.6563)
std['embed']['num'] = (0.0131, 0.0106, 0.0184)
# person
acc['embed']['Per'] = (0.5648, 0.5789, 0.6017)
std['embed']['Per'] = (0.0984, 0.0493, 0.0405)
# tense
acc['embed']['Ten'] = (0.7219, 0.7090, 0.7483)
std['embed']['Ten'] = (0.0051, 0.0466, 0.0073)
# mood
acc['embed']['Moo'] = (0.4752,0.4515, 0.4908)
std['embed']['Moo'] = (0.0370, 0.0640, 0.0250)
#
# all features averaged
layer = 'embed'
acc_array = []
for L in range(3):
acc_array.append(np.mean([acc[layer]['gen'][L],acc[layer]['num'][L],acc[layer]['Per'][L],acc[layer]['Ten'][L],acc[layer]['Moo'][L]]))
acc[layer]['avgAllFeats'] = acc_array
print(acc[layer]['avgAllFeats'])
acc_array = []
for L in range(3):
acc_array.append(np.mean([acc[layer]['gen'][L],acc[layer]['num'][L],acc[layer]['Ten'][L]]))
acc[layer]['genNumTen'] = acc_array
print(acc[layer]['genNumTen'])
# std_array = []
# for L in range(3):
# std_array.append(np.mean([std[layer]['gen'][L],std[layer]['num'][L],std[layer]['Per'][L],std[layer]['Ten'][L],std[layer]['Moo'][L]]))
# std[layer]['avgAllFeats'] = std_array
#print(std[layer]['avgAllFeats'])
std[layer]['avgAllFeats'] = (0,0,0) # HACK!
std[layer]['genNumTen'] = (0,0,0) # HACK!
#gender
acc['lstmo']['gen'] = (0.8045,0.6505,0.5949)
std['lstmo']['gen'] = (0.0094,0.0228,0.0106)
#gender with itNoMasc (2nd lang)
acc['lstmo']['genITM'] = (0.8045,0.6191,0.6505,0.5949)
std['lstmo']['genITM'] = (0.0094,0.0175,0.0228,0.0106)
#number
acc['lstmo']['num'] = (0.9413, 0.9463, 0.9278)
std['lstmo']['num'] = (0.0016,0.0036, 0.0050)
#person
acc['lstmo']['Per'] = (0.6777, 0.6727, 0.6888)
std['lstmo']['Per'] = (0.0329, 0.0297, 0.0220)
# tense
acc['lstmo']['Ten'] = (0.9019, 0.8880, 0.8897)
std['lstmo']['Ten'] = (0.0080, 0.0086, 0.0169)
#mood
acc['lstmo']['Moo'] = (0.8182, 0.8070, 0.8041)
std['lstmo']['Moo'] = (0.0067, 0.0126, 0.0240)
#
# all features averaged
layer = 'lstmo'
acc_array = []
for L in range(3):
acc_array.append(np.mean([acc[layer]['gen'][L],acc[layer]['num'][L],acc[layer]['Per'][L],acc[layer]['Ten'][L],acc[layer]['Moo'][L]]))
acc[layer]['avgAllFeats'] = acc_array
print(acc[layer]['avgAllFeats'])
acc_array = []
for L in range(3):
acc_array.append(np.mean([acc[layer]['gen'][L],acc[layer]['num'][L],acc[layer]['Ten'][L]]))
acc[layer]['genNumTen'] = acc_array
print(acc[layer]['genNumTen'])
# std_array = []
# for L in range(3):
# std_array.append(np.mean([std[layer]['gen'][L],std[layer]['num'][L],std[layer]['Per'][L],std[layer]['Ten'][L],std[layer]['Moo'][L]]))
# std[layer]['avgAllFeats'] = std_array
#print(std[layer]['avgAllFeats'])
std[layer]['avgAllFeats'] = (0,0,0) # HACK!
std[layer]['genNumTen'] = (0,0,0) # HACK!
#############
#############
feats = ['gen','num','Per','Ten','Moo','avgAllFeats','genITM','genNumTen']
featNames = ['Gender','Number','Person','Tense','Mood','All 5 Features','Gender', 'All Features']
#for i in range(6):
for i in range(7,8): # only genNumTen
feat = feats[i]
featName = featNames[i]
N = 3 # for: baseline, embedding, lstm-state
if feat == 'genITM':
N = 4
ind = np.arange(N) # the x locations for the groups
width = 0.3 # the width of the bars
fig, ax = plt.subplots()
colors1 = ('#9999FF')
colors2 = ('#0000FF')
#if feat == 'genITM': # use diff color for Fr-ITnoMasc
# colors1 = ('#9999FF','#85e085','#9999FF','#9999FF')
# colors2 = ('#0000FF','#248F24','#0000FF','#0000FF')
rects0 = ax.bar(0.5*width, base[feat], width, color='#FF9900', yerr=baseStd[feat])
rects1 = ax.bar(2.5*width+ind+0.5*width, acc['embed'][feat], width, color=colors1, yerr=std['embed'][feat])
rects2 = ax.bar(2.5*width+ind+0.5*width + width, acc['lstmo'][feat], width, color=colors2, yerr=std['lstmo'][feat])
# add some text for labels, title and axes ticks
ax.set_ylabel('Prediction Accuracy',size=12)
ax.set_title(featName + ' prediction',size=16)
xticks = (np.arange(N+1) + 0.05)
xticks[0] = width/2
#ax.set_xticks(width/2, np.arange(N) + width / 2)
ax.set_xticks(xticks)
ax.set_xticklabels(('majClass', 'FR-IT', 'FR-DE', 'FR-EN'))
if feat == 'genITM':
ax.set_xticklabels(('majClass', 'FR-IT', 'FR-IT*', 'FR-DE', 'FR-EN'))
ax.set_ylim(0.2,1)
ax.legend((rects1[0], rects2[0]), ('Word embedding', 'LSTM state'))
filename = feat + '_byLang.pdf'
plt.savefig(filename, bbox_inches='tight')
#plt.show()
| mit |
asalomatov/variants | variants/work/extractFeaturesFromVCF.py | 1 | 19408 | import sys
sys.path.insert(0, '/mnt/xfs1/home/asalomatov/projects/variants/variants')
import func
import variants
import ped
import features_vcf as fv
import pandas as pd
import features_vcf
import numpy as np
import os
import sklearn
variants = reload(variants)
func = reload(func)
ped = reload(ped)
###SSC
infile_ped = '/mnt/scratch/asalomatov/data/SSC/SSCped/SSC.ped'
myped = ped.Ped(infile_ped, ['collection'])
myped.getParents(11006)
#myped.addVcf()
myped.addVcf(file_pat = '/mnt/scratch/asalomatov/data/SSC/vcf/raw/%s.family.vqsr.sorted.vcf.gz')
myped.ped.head()
###Columbia
infile_ped = '/mnt/scratch/asalomatov/data/columbia/pcgc_ped.txt'
myped = ped.Ped(infile_ped, [])
myped.getParents('1-00034')
myped.getFather('1-00034')
myped.
myped.ped.head()
myped.ped.shape
myped.addVcf(file_pat = '/mnt/scratch/asalomatov/data/columbia/vcf/deco/%s_%s-02_%s-01.annotated-deco.vcf.gz')
sum(myped.ped.vcf.notnull())
infile_vcf = '/mnt/scratch/asalomatov/SSC_rerun/denovo_analysis/rerun200fam/11006-HC-pm50-ann.vcf.gz'
#infile_vcf = '/mnt/scratch/asalomatov/data/columbia/vcf/deco/1-03173_1-03173-02_1-03173-01.annotated-deco.vcf.gz'
myvars = variants.Variants(infile_vcf, '11006')
myvars.readFromVcf()
myvars.samples
record = myvars.vcf_reader.next()
record.samples
myvars._colNamesFormat()
myvars.describeFormatFields()
###features
###extract all variants from rerun to be tested
infile_ped = '/mnt/scratch/asalomatov/data/SSC/SSCped/SSC.ped'
myped = ped.Ped(infile_ped, ['collection'])
myped.getParents(11006)
myped.addVcf()
fv = reload(features_vcf)
train_set = "/mnt/scratch/asalomatov/data/SSC/SSCdeNovoCalls/ssc_exome_all_snp.txt"
ftrs = fv.FeaturesVcf(myped, train_set)
ftrs.variants.head()
df_l = []
for f in fam_trio:
print f
fv = reload(features_vcf)
train_set = "/mnt/scratch/asalomatov/data/SSC/SSCdeNovoCalls/ssc_exome_all_snp.txt"
ftrs = fv.FeaturesVcf(myped, train_set)
if not os.path.isfile('/mnt/scratch/asalomatov/data/SSC/vcf/raw/' + str(f) +'.family.vqsr.sorted.vcf.gz'):
continue
df = ftrs._fileHasVariant('/mnt/scratch/asalomatov/data/SSC/vcf/raw/' + str(f) +'.family.vqsr.sorted.vcf.gz', fam_id=f, ind_id=str(f)+'.p1', chrom=None, pos_start=None, pos_end=None)
df_l.append(df)
len(df_l)
df_test = pd.concat(df_l)
type(df_test)
df_test.isnull().sum()
df_test.shape
df_test.offspring_gt_type.value_counts()
c1 = df_test.vartype.isin(['snp'])
sum(c1)
c3 = df_test['offspring_gt_type'] == 0
df_test = df_test[c1 & ~c3]
df_test.shape
df_test = addVar(df_test, 'ind_id')
ftrs.variants = addVar(ftrs.variants, 'ind_id')
df_test = pd.merge(df_test, ftrs.variants[['var', 'status']], how='left', on='var')
df_test.status.value_counts()
ftrs.variants.status.value_counts()
ftrs.variants.head()
df_test = df_test.dropna(subset=['format_father_0_PL'])
df_test = df_test.dropna(subset=['format_father_1_PL'])
df_test = df_test.dropna(subset=['format_father_2_PL'])
df_test.shape
df_test = df_test.dropna(subset=['format_mother_0_PL'])
df_test = df_test.dropna(subset=['format_mother_1_PL'])
df_test = df_test.dropna(subset=['format_mother_2_PL'])
df_test.shape
df_test = df_test.dropna(subset=['format_offspring_0_PL'])
df_test = df_test.dropna(subset=['format_offspring_1_PL'])
df_test = df_test.dropna(subset=['format_offspring_2_PL'])
df_test.shape
df_test.isnull().sum()
a = pd.DataFrame(ftrs.variants.ind_id.str.split('.',1).tolist(), columns=['fam_id', 'memb'], index=ftrs.variants.index)
ftrs.variants = ftrs.variants.join(a)
ftrs.variants.head()
ftrs.variants.dtypes
ftrs.variants['fam_id'] = ftrs.variants['fam_id'].astype(int)
sum(ftrs.variants.fam_id.isin(fam_trio))
df_test_num = df2sklearn(df_test)
df_test_num.isnull().sum()
df_test_num.head()
df_test_num.dtypes
df_train_set_num = pd.concat([df_train_num, df_neg_num])
df_train_set_num.head()
df_train_set_num.dtypes
df_train_set_num.shape
df_train_set_num.status01.value_counts()
df_train_set_num.isnull().sum()
df_train_set_num = df_train_set_num.dropna()
ftrs.variants[ftrs.variants..status.value_counts()
### extract some negative examples
train_set = "/mnt/scratch/asalomatov/data/SSC/SSCdeNovoCalls/ssc_exome_verified_snp.txt"
train_set = "/mnt/scratch/asalomatov/data/SSC/SSCdeNovoCalls/ssc_exome_all_snp.txt"
ftrs = fv.FeaturesVcf(myped, train_set)
ftrs.variants.head()
ftrs.variants.status.value_counts()
ftrs.variants.groupby(['descr', 'status']).apply(len)
df_l = []
for f in df_train['family_id'][:5]:
print f
train_set = "/mnt/scratch/asalomatov/data/SSC/SSCdeNovoCalls/ssc_exome_verified_snp.txt"
ftrs = fv.FeaturesVcf(myped, train_set)
df = ftrs._fileHasVariant('/mnt/scratch/asalomatov/data/SSC/vcf/raw/' + str(f) +'.family.vqsr.sorted.vcf.gz',
fam_id=f, ind_id=str(f)+'.p1', chrom=None, pos_start=None, pos_end=None)
df_l.append(df)
df = pd.concat(df_l)
type(df)
df.isnull().sum()
df.reset_index(drop=True, inplace=True)
df.isnull().sum()
df.head()
df.shape
df.columns
df.FILTER.value_counts()
df_neg = df
#get some negative examples
#low quality snp
c1 = df.vartype.isin(['snp'])
sum(c1)
c2 = df.FILTER.isin(['PASS'])
sum(c1 & ~c2)
df_lo_qual_snp = df[c1 & ~ c2]
df_lo_qual_snp.isnull().sum()
#child is homRef
df.offspring_gt_type.value_counts()
c1 = df.vartype.isin(['snp'])
sum(c1)
c2 = df.FILTER.isin(['PASS'])
c3 = df['offspring_gt_type'] == 0
df_p1_homref = df[c1 & c2 & c3]
df_p1_homref.isnull().sum()
df_p1_homref.reset_index(drop=True, inplace=True)
N = len(df_p1_homref.index) - 1
N
N_rows = list(set(np.random.randint(low=0, high=N, size=1000)))
len(N_rows)
len(set(N_rows))
df_p1_homref = df_p1_homref.ix[N_rows]
df_p1_homref.shape
df_p1_homref.isnull().sum()
###concat negative examples
df_neg = pd.concat([df_lo_qual_snp, df_p1_homref])
df_neg.shape
df_neg.isnull().sum()
df_neg['status'] = 'N'
df_neg.columns
### annotate training set with features from vcf files
import os
import sys
sys.path.insert(0, '/mnt/xfs1/home/asalomatov/projects/variants/variants')
import variants
import ped
import func
import features_vcf as fv
import pandas as pd
import numpy as np
ped_file = '/mnt/scratch/asalomatov/data/SSC/SSCped/SSC.ped'
myped = ped.Ped(ped_file, ['collection'])
myped.addVcf(file_pat = '/mnt/scratch/asalomatov/data/SSC/wes/vcf/raw/%s.family.vqsr.sorted.vcf.gz')
myped.ped.head()
dnv_file = "/mnt/scratch/asalomatov/data/SSC/SSCdeNovoCalls/ssc_denovo_clean_snp.tsv"
ftrs = fv.FeaturesVcf(myped, dnv_file)
ftrs.ped.ped.head()
ftrs.variants.head()
ftrs.variants.chr.value_counts()
ftrs.variants.shape
df_train = ftrs.extractFeatures()
len(df_train)
df_train = pd.concat(df_train)
df_train.shape
df_train.head()
df_train.columns
def addVar(df, field):
try: df['var'] = df[field].map(str) + '_' + df.POS.map(str) + '_' + df.REF.map(str) + '_' + df.ALT.map(str)
except: df['var'] = df[field].map(str) + '_' + df.pos.map(str) + '_' + df.ref.map(str) + '_' + df.alt.map(str)
return df
# join to add status column
df_train = addVar(df_train, 'ind_id')
ftrs.variants = addVar(ftrs.variants, 'ind_id')
ftrs.variants.status.value_counts()
df_train = pd.merge(df_train, ftrs.variants[['var', 'status']], how='left', on='var')
df_train.status.value_counts()
df_train.status.isnull().sum()
df_train = df_train[~df_train.status.isnull()]
df_train['status'][df_train['status'] == 'ND'] = 'Y'
df_train.to_csv("/mnt/scratch/asalomatov/data/SSC/SSCdeNovoCalls/ssc_denovo_clean_snp_Kr_GATK.tsv", sep="\t", index=False)
df_neg = addVar(df_neg, 'ind_id')
df_neg = pd.merge(df_neg, ftrs.variants[['var', 'status']], how='left', on='var')
df_neg.status.value_counts()
df_neg.shape
df_neg = df_neg[df_neg.status.isnull()]
df_neg.shape
df_neg.status = 'N'
df_train.tail()
df_train.shape
fam_trio = [11193, 11195, 11198, 11827, 13415, 11989, 13733, 11055, 11056, 11545, 11303, 12073,
12521, 11660, 11388, 11262, 11707, 13008, 12933, 13844, 11184, 11834, 12437, 12430,
11109, 12532, 11023, 11375, 13314, 13557, 13158, 12300, 11471, 13494, 13857, 12381,
11205, 13914, 13757, 12015, 13610, 14292, 12157, 13863, 13678, 11120, 13530, 13532,
11124, 12641, 11083, 11218, 13668, 13742, 11518, 13741, 13333, 12249, 11009, 11510,
12086, 12674, 11599, 13031, 11096, 11948, 11093, 11947, 11556, 11346, 11224, 13207,
12444, 11506, 11504, 12036, 11587, 12237, 12335, 12130, 11425, 12238, 14020, 12621,
13517, 11753, 12185, 11006, 11069, 11141, 12744, 11064, 11148, 11734, 11863, 12225,
12341, 12346, 12198, 11526, 11523, 13812, 11480, 11928, 12114, 12118, 11246, 12752,
12296, 12212, 14006, 11498, 11043, 12555, 12667, 13822, 12603, 11396, 11257, 13701,
11398, 13274, 11653, 11843, 11969]
fam_quad = [13188, 14011, 11964, 13048, 11491, 13793, 11190, 13890, 13835, 12810, 12390, 13169, 12905, 11569, 11629, 11469, 12106, 11773, 13447, 12161, 13116, 11013, 11872, 11172, 11711, 11715, 12011, 14201, 12741, 11390, 11959, 13926, 13335, 11942, 13815, 12373, 12285, 13593, 12703, 11029, 11659, 11472, 11459, 11610, 11788, 13606, 11229, 13346, 11452, 11479, 11722, 13629, 12152, 12153, 12630, 12578, 11696, 12304, 13533, 12358, 12233, 11691]
len(fam_trio)
df_train.count
df_train = df_train[~df_train.family_id.isin(fam_trio)]
df_train.shape
df_train.head()
df_train.isnull().sum()
col_to_keep = [u'QUAL', u'info_BaseQRankSum', u'info_ClippingRankSum', u'info_DP', u'info_FS', u'info_GC', u'info_HRun',
u'info_MQ', u'info_MQ0', u'info_MQRankSum', u'info_QD',
u'info_ReadPosRankSum', u'info_SOR', u'format_father_ref_AD', u'format_father_alt_AD', u'format_father_DP',
u'format_father_GQ', u'format_father_0_PL', u'format_father_1_PL',
u'format_father_2_PL',u'format_mother_ref_AD', u'format_mother_alt_AD', u'format_mother_DP',
u'format_mother_GQ', u'format_mother_0_PL', u'format_mother_1_PL', u'format_mother_2_PL',
u'format_offspring_ref_AD', u'format_offspring_alt_AD', u'format_offspring_DP', u'format_offspring_GQ',
u'format_offspring_0_PL', u'format_offspring_1_PL', u'format_offspring_2_PL']
col_to_keep_rerun = [u'QUAL', u'info_BaseQRankSum', u'info_ClippingRankSum', u'info_DP', u'info_FS',
u'info_MQ', u'info_MQ0', u'info_MQRankSum', u'info_QD',
u'info_ReadPosRankSum', u'format_father_ref_AD', u'format_father_alt_AD', u'format_father_DP',
u'format_father_GQ', u'format_father_0_PL', u'format_father_1_PL',
u'format_father_2_PL',u'format_mother_ref_AD', u'format_mother_alt_AD', u'format_mother_DP',
u'format_mother_GQ', u'format_mother_0_PL', u'format_mother_1_PL', u'format_mother_2_PL',
u'format_offspring_ref_AD', u'format_offspring_alt_AD', u'format_offspring_DP', u'format_offspring_GQ',
u'format_offspring_0_PL', u'format_offspring_1_PL', u'format_offspring_2_PL']
len(col_to_keep_rerun)
len(col_to_keep)
len(feature_cols)
def df2sklearn(mydf, col_to_keep):
if 'status' in mydf.columns:
mydf['status01'] = 1
mydf['status01'][mydf['status'] == 'N'] = 0
col_to_keep += ['status01']
col_to_keep = list(set(col_to_keep).intersection(set(mydf.columns)))
print col_to_keep
#res = mydf[col_to_keep]
mydf[col_to_keep] = mydf[col_to_keep].astype(float)
mydf = mydf.dropna(subset = col_to_keep)
return mydf[col_tokeep]
df_train_num = func.df2sklearn(df_train,col_to_keep_rerun)
df_train_num.isnull().sum()
df_train_num.head()
df_train_num.dtypes
df_train_num.status01.value_counts()
df_neg.isnull().sum()
df_neg_num = func.df2sklearn(df_neg, col_to_keep_rerun)
df_neg_num.isnull().sum()
df_neg_num.head()
df_neg_num.dtypes
df_neg_num.status01.value_counts()
df_train_set_num = pd.concat([df_train_num, df_neg_num])
df_train_set_num.head()
df_train_set_num.dtypes
df_train_set_num.shape
df_train_set_num.status01.value_counts()
df_train_set_num.isnull().sum()
df_train_set_num = df_train_set_num.dropna()
df_train_set_num.isnull().sum()
df_train_set_num.shape
df_train_set_num.describe()
#df_train_set_num.to_csv("ssc_snp_training_set_no187.csv", sep="\t", header=True, index=False)
df_train_set_num = pd.read_table("ssc_snp_training_set_no187.csv", sep="\t")
df_train_set_num.dtypes
#### below is for my rerun
df_train_set_num = df_train_set_num[col_to_keep_rerun]
def addAlleleBalance(mydf):
mydf['offspring_allele_balance'] = mydf['format_offspring_alt_AD']/(mydf['format_offspring_alt_AD'] + mydf['format_offspring_ref_AD'])
mydf['father_allele_balance'] = mydf['format_father_alt_AD']/(mydf['format_father_alt_AD'] + mydf['format_father_ref_AD'])
mydf['mother_allele_balance'] = mydf['format_mother_alt_AD']/(mydf['format_mother_alt_AD'] + mydf['format_mother_ref_AD'])
mydf = mydf.dropna(subset = ['offspring_allele_balance', 'father_allele_balance', 'mother_allele_balance'])
return mydf
df_train_set_num = addAlleleBalance(df_train_set_num)
#df_train_set_num.to_csv("ssc_snp_training_set.csv", sep="\t", header=True, index=False)
feature_cols = [x for x in df_train_set_num.columns if x not in 'status01']
print feature_cols
response_col = 'status01'
##add allel ballance
### sklearn
#from sklearn.datasets import make_hastie_10_2
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor
# generate synthetic data from ESLII - Example 10.2
#X, y = make_hastie_10_2(n_samples=5000)
#X_train, X_test, y_train, y_test = train_test_split(X, y)
#
# fit estimator
est = GradientBoostingClassifier(n_estimators=2000, max_depth=1, learning_rate=.01)
est_regr = GradientBoostingRegressor(n_estimators=2000, max_depth=1, learning_rate=.01)
len(feature_cols)
X_train = df_train_set_num[feature_cols].values
y_train = df_train_set_num[response_col].values
X_tr, X_te, y_tr, y_te = train_test_split(X_train, y_train)
X_tr
est.fit(X_tr, y_tr)
est_regr.fit(X_tr, y_tr)
feature_importance = pd.DataFrame({'contrib': est.feature_importances_ ,'name': feature_cols})
feature_importance = pd.DataFrame({'contrib': est_regr.feature_importances_ ,'name': feature_cols})
feature_importance.sort(['contrib'], ascending=[False], inplace=True)
feature_importance
#feature_importance.to_excel('feature_contrib.xls', index=False)
est.loss_
sum(est.feature_importances_)
# predict class labels
pred = est.predict(X_te)
pred_regr = est_regr.predict(X_te)
len(pred)
pred_ser = pd.Series(pred)
pred_ser.describe()
pred_ser.value_counts()
sum(y_te == 1)
sum(pred_regr > .17)
sum(pred[pred_regr > .17] == 1)
# score on test data (accuracy)
acc = est.score(X_te, y_te)
print('ACC: %.4f' % acc)
# predict class probabilities
est.predict_proba(X_te)[0]
df = None
df_list = []
len(fam_trio)
#for f in fam_trio:
#for f in fam_quad:
for f in fam_trio + fam_quad:
memb = 'p1'
print f
# if str(f) in files_missing:
# continue
fv = reload(features_vcf)
train_set = "/mnt/scratch/asalomatov/data/SSC/SSCdeNovoCalls/ssc_exome_all_snp.txt"
ftrs = fv.FeaturesVcf(myped, train_set)
#filepath ='/mnt/scratch/asalomatov/data/SSC/vcf/raw/' + str(f) +'.family.vqsr.sorted.vcf.gz'
#filepath ='/mnt/ceph/asalomatov/SSC_Eichler/rerun/ssc' + str(f) + '/' + str(f) +'-JHC-vars.vcf.gz'
filepath = '/mnt/scratch/asalomatov/SSC_rerun/denovo_analysis/rerun200fam/'+ str(f) + '-JHC-pm50.vcf.gz'
print filepath
if not os.path.isfile(filepath):
continue
df = ftrs._fileHasVariant(filepath, fam_id=f, ind_id=str(f)+'.'+memb, chrom=None, pos_start=None, pos_end=None)
if df is None:
continue
df_num = func.df2sklearn(df, col_to_keep_rerun)
df_num = addAlleleBalance(df_num)
df_num['score'] = est.predict(df_num[feature_cols].values)
# print 'df_num dim :', df_num.shape
# df_num = df_num[df_num['score'] > .00]
# print 'df_num dim :', df_num.shape
df_list.append(df_num)
len(df_list)
len(df_list_krumm)
type(df)
#df_list_krumm = df_list
df_re = pd.concat(df_list)
df_re.shape
df_re.head()
df_re.score.value_counts()
ftrs.variants.head()
ftrs.variants.shape
c1 = ftrs.variants.ind_id.isin(df_re.ind_id)
sum(c1)
df_kn = ftrs.variants[c1]
df_kn.shape
df_re['var'] = df_re['ind_id'].map(str)+ '_' + df_re['CHROM'].map(str) + '_' + df_re.POS.map(str) + '_' + df_re.REF.map(str) + '_' + df_re.ALT.map(str)
ftrs.variants['var'] = ftrs.variants['ind_id'].map(str)+ '_' + ftrs.variants['chr'].map(str) + '_' + ftrs.variants.pos.map(str) + '_' + ftrs.variants.ref.map(str) + '_' + ftrs.variants.alt.map(str)
df_re['pos_var'] = df_re['CHROM'].map(str) + '_' + df_re.POS.map(str) + '_' + df_re.REF.map(str) + '_' + df_re.ALT.map(str)
ftrs.variants['var'] = ftrs.variants['ind_id'].map(str)+ '_' + ftrs.variants['chr'].map(str) + '_' + ftrs.variants.pos.map(str) + '_' + ftrs.variants.ref.map(str) + '_' + ftrs.variants.alt.map(str)
df_re.groupby('pos_var').apply(sum)
df_re[df_re.score > .75].groupby('vartype').apply(varsumm)
#how many unique variants?
df_re.vartype.value_counts()
df_re.pheno.value_counts()
len(set(df_re['CHROM'].map(str) + '_' + df_re.POS.map(str) + '_' + df_re.REF.map(str) + '_' + df_re.ALT.map(str)))
c1 = (df_re['dbsnp'] == 'no') & (df_re['vartype'] == 'snp')
len(set((df_re['CHROM'].map(str) + '_' + df_re.POS.map(str) + '_' + df_re.REF.map(str) + '_' +
df_re.ALT.map(str))[c1]))
###apply hard filter(Ash)
cAsh_snp_1 = df_re['QUAL'] >= 30
cAsh_snp_2 = df_re['info_FS'] < 25
cAsh_snp_3 = df_re['info_QD'] >= 4
cAsh_indel_1 = df_re['info_QD'] >= 1
df_re['status'][cAsh_snp_1 & cAsh_snp_2 & cAsh_snp_3].value_counts()
df_re['dbsnp'][cAsh_snp_1 & cAsh_snp_2 & cAsh_snp_3].value_counts()
df_re['dbsnp'].value_counts()
df_re = pd.merge(df_re, df_kn[['var', 'status', 'descr']], how='left', on='var')
df_re.head()
df_re['status'][df_re.status.isnull()] = 'extra'
df_re['dbsnp'] = 'yes'
df_re['dbsnp'][df_re.ID.isnull()] = 'no'
df_re.ID.isnull().sum()
df_re.dbsnp.value_counts()
df_re.dbsnp.value_counts()
df_re.status.value_counts()
df_re[df_re.score > .75].status.value_counts()
df_re[df_re.score > .75].status.value_counts()
df_re[~df_re.status.isin(['extra'])].dbsnp.value_counts()
df_re.
df_re.groupby('vartype').apply(varsumm)
df_re[df_re.score > .75].groupby('vartype').apply(varsumm)
df_re[df_re.score > .9].groupby('vartype').apply(varsumm)
df_re[df_re.score > .95].groupby('vartype').apply(varsumm)
df_re[df_re.score > .99].groupby('vartype').apply(varsumm)
df_re[df_re.score > .999].groupby('vartype').apply(varsumm)
def varsumm(x):
validated = sum(x['status'] == 'Y')
not_determined = sum(x['status'] == 'ND')
failed = sum(x['status'] == 'N')
extra = sum(x['dbsnp'] == 'no')
return pd.Series ([validated, not_determined, failed, extra], index = ['validated', 'not_determined', 'failed', 'extra'])
df.head()
df.columns
df.shape
df.isnull().sum()
set(df.columns).difference(set(col_to_keep))
set(col_to_keep).difference(set(df.columns))
df_num = df2sklearn(df, col_to_keep_rerun)
df_num.head()
df_num.dtypes
df_num[col_to_keep_rerun].dtypes
df_num = addAlleleBalance(df_num)
df_num = df_num.dropna()
df_num.shape
df.offspring_gt_type.value_counts()
df_num.isnull().sum()
set(df_num.columns).difference(set(df_train_set_num.columns))
set(df_train_set_num.columns).difference(set(df_num.columns))
pred_test = est.predict(df_num[feature_cols].values)
len(pred_test)
df_num['score'] = pred_test
len(pred_test)
pred_test[:100]
pred_test_ser = pd.Series(pred_test)
pred_test_ser.describe()
ftrs.variants[ftrs.variants.ind_id.isin([f])]
| mit |
andrewgiessel/folium | examples/choropleth_states.py | 12 | 1111 | '''
Choropleth map of US states
'''
import folium
import pandas as pd
state_geo = r'us-states.json'
state_unemployment = r'US_Unemployment_Oct2012.csv'
state_data = pd.read_csv(state_unemployment)
#Let Folium determine the scale
states = folium.Map(location=[48, -102], zoom_start=3)
states.geo_json(geo_path=state_geo, data=state_data,
columns=['State', 'Unemployment'],
key_on='feature.id',
fill_color='YlGn', fill_opacity=0.7, line_opacity=0.2,
legend_name='Unemployment Rate (%)')
states.create_map(path='us_state_map.html')
#Let's define our own scale and change the line opacity
states2 = folium.Map(location=[48, -102], zoom_start=3)
states2.geo_json(geo_path=state_geo, data=state_data,
columns=['State', 'Unemployment'],
threshold_scale=[5, 6, 7, 8, 9, 10],
key_on='feature.id',
fill_color='BuPu', fill_opacity=0.7, line_opacity=0.5,
legend_name='Unemployment Rate (%)',
reset=True)
states2.create_map(path='us_state_map_2.html')
| mit |
aman-iitj/scipy | scipy/signal/spectral.py | 2 | 34720 | """Tools for spectral analysis.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy import fftpack
from . import signaltools
from .windows import get_window
from ._spectral import lombscargle
import warnings
from scipy._lib.six import string_types
__all__ = ['periodogram', 'welch', 'lombscargle', 'csd', 'coherence',
'spectrogram']
def periodogram(x, fs=1.0, window=None, nfft=None, detrend='constant',
return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using a periodogram.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is an array it will be used
directly as the window. Defaults to None; equivalent to 'boxcar'.
nfft : int, optional
Length of the FFT used. If None the length of `x` will be used.
detrend : str or function or False, optional
Specifies how to detrend `x` prior to computing the spectrum. If
`detrend` is a string, it is passed as the ``type`` argument to
`detrend`. If it is a function, it should return a detrended array.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power spectrum
('spectrum') where `Pxx` has units of V**2, if `x` is measured in V
and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of `x`.
Notes
-----
.. versionadded:: 0.12.0
See Also
--------
welch: Estimate power spectral density using Welch's method
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.periodogram(x, fs)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([1e-7, 1e2])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.ylim([1e-4, 1e1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if window is None:
window = 'boxcar'
if nfft is None:
nperseg = x.shape[axis]
elif nfft == x.shape[axis]:
nperseg = nfft
elif nfft > x.shape[axis]:
nperseg = x.shape[axis]
elif nfft < x.shape[axis]:
s = [np.s_[:]]*len(x.shape)
s[axis] = np.s_[:nfft]
x = x[s]
nperseg = nfft
nfft = None
return welch(x, fs, window, nperseg, 0, nfft, detrend, return_onesided,
scaling, axis)
def welch(x, fs=1.0, window='hanning', nperseg=256, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using Welch's method.
Welch's method [1]_ computes an estimate of the power spectral density
by dividing the data into overlapping segments, computing a modified
periodogram for each segment and averaging the periodograms.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power spectrum
('spectrum') where `Pxx` has units of V**2, if `x` is measured in V
and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of x.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hanning' window an
overlap of 50% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
If `noverlap` is 0, this method is equivalent to Bartlett's method [2]_.
.. versionadded:: 0.12.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika, vol. 37, pp. 1-16, 1950.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.welch(x, fs, nperseg=1024)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([0.5e-3, 1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
freqs, Pxx = csd(x, x, fs, window, nperseg, noverlap, nfft, detrend,
return_onesided, scaling, axis)
return freqs, Pxx.real
def csd(x, y, fs=1.0, window='hanning', nperseg=256, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density', axis=-1):
"""
Estimate the cross power spectral density, Pxy, using Welch's method.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap: int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross spectrum
('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
measured in V and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the CSD is computed for both inputs; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxy : ndarray
Cross spectral density or cross power spectrum of x,y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method. [Equivalent to csd(x,x)]
coherence: Magnitude squared coherence by Welch's method.
Notes
--------
By convention, Pxy is computed with the conjugate FFT of X multiplied by
the FFT of Y.
If the input series differ in length, the shorter series will be
zero-padded to match.
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hanning' window an
overlap of 50\% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Rabiner, Lawrence R., and B. Gold. "Theory and Application of
Digital Signal Processing" Prentice-Hall, pp. 414-419, 1975
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the magnitude of the cross spectral density.
>>> f, Pxy = signal.csd(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, np.abs(Pxy))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('CSD [V**2/Hz]')
>>> plt.show()
"""
freqs, Pxy, _ = _spectral_helper(x, y, fs, window, nperseg, noverlap, nfft,
detrend, return_onesided, scaling, axis,
mode='psd')
# Average over windows.
if len(Pxy.shape) >= 2 and Pxy.size > 0:
if Pxy.shape[-1] > 1:
Pxy = Pxy.mean(axis=-1)
else:
Pxy = np.reshape(Pxy, Pxy.shape[:-1])
return freqs, Pxy
def spectrogram(x, fs=1.0, window=('tukey',.25), nperseg=256, noverlap=None,
nfft=None, detrend='constant', return_onesided=True,
scaling='density', axis=-1):
"""
Compute a spectrogram with consecutive Fourier transforms.
Spectrograms can be used as a way of visualizing the change of a
nonstationary signal's frequency content over time.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to a Tukey window with shape parameter of 0.25.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 8``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power spectrum
('spectrum') where `Pxx` has units of V**2, if `x` is measured in V
and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the spectrogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
t : ndarray
Array of segment times.
Sxx : ndarray
Spectrogram of x. By default, the last axis of Sxx corresponds to the
segment times.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. In contrast to welch's method, where the entire
data stream is averaged over, one may wish to use a smaller overlap (or
perhaps none at all) when computing a spectrogram, to maintain some
statistical independence between individual segments.
.. versionadded:: 0.16.0
References
----------
...[1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck "Discrete-Time
Signal Processing", Prentice Hall, 1999.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave whose frequency linearly changes
with time from 1kHz to 2kHz, corrupted by 0.001 V**2/Hz of white noise
sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> freq = np.linspace(1e3, 2e3, N)
>>> x = amp * np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the spectrogram.
>>> f, t, Sxx = signal.spectrogram(x, fs)
>>> plt.pcolormesh(t, f, Sxx)
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
"""
# Less overlap than welch, so samples are more statisically independent
if noverlap is None:
noverlap = nperseg // 8
freqs, Pxy, time = _spectral_helper(x, x, fs, window, nperseg, noverlap,
nfft, detrend, return_onesided, scaling,
axis, mode='psd')
return freqs, time, Pxy
def coherence(x, y, fs=1.0, window='hanning', nperseg=256, noverlap=None,
nfft=None, detrend='constant', axis=-1):
"""
Estimate the magnitude squared coherence estimate, Cxy, of discrete-time
signals X and Y using Welch's method.
Cxy = abs(Pxy)**2/(Pxx*Pyy), where Pxx and Pyy are power spectral density
estimates of X and Y, and Pxy is the cross spectral density estimate of X
and Y.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap: int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
axis : int, optional
Axis along which the coherence is computed for both inputs; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Cxy : ndarray
Magnitude squared coherence of x and y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
--------
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hanning' window an
overlap of 50\% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Stoica, Petre, and Randolph Moses, "Spectral Analysis of Signals"
Prentice Hall, 2005
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the coherence.
>>> f, Cxy = signal.coherence(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, Cxy)
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Coherence')
>>> plt.show()
"""
freqs, Pxx = welch(x, fs, window, nperseg, noverlap, nfft, detrend,
axis=axis)
_, Pyy = welch(y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)
_, Pxy = csd(x, y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)
Cxy = np.abs(Pxy)**2 / Pxx / Pyy
return freqs, Cxy
def _spectral_helper(x, y, fs=1.0, window='hanning', nperseg=256,
noverlap=None, nfft=None, detrend='constant',
return_onesided=True, scaling='spectrum', axis=-1,
mode='psd'):
'''
Calculate various forms of windowed FFTs for PSD, CSD, etc.
This is a helper function that implements the commonality between the
psd, csd, and spectrogram functions. It is not designed to be called
externally. The windows are not averaged over; the result from each window
is returned.
Parameters
---------
x : array_like
Array or sequence containing the data to be analyzed.
y : array_like
Array or sequence containing the data to be analyzed. If this is
the same object in memoery as x (i.e. _spectral_helper(x, x, ...)),
the extra computations are spared.
fs : float, optional
Sampling frequency of the time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross spectrum
('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
measured in V and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
mode : str, optional
Defines what kind of return values are expected. Options are ['psd',
'complex', 'magnitude', 'angle', 'phase'].
Returns
-------
freqs : ndarray
Array of sample frequencies.
result : ndarray
Array of output data, contents dependant on *mode* kwarg.
t : ndarray
Array of times corresponding to each data segment
References
----------
stackoverflow: Rolling window for 1D arrays in Numpy?
<http://stackoverflow.com/a/6811241>
stackoverflow: Using strides for an efficient moving average filter
<http://stackoverflow.com/a/4947453>
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
'''
if mode not in ['psd', 'complex', 'magnitude', 'angle', 'phase']:
raise ValueError("Unknown value for mode %s, must be one of: "
"'default', 'psd', 'complex', "
"'magnitude', 'angle', 'phase'" % mode)
# If x and y are the same object we can save ourselves some computation.
same_data = y is x
if not same_data and mode != 'psd':
raise ValueError("x and y must be equal if mode is not 'psd'")
axis = int(axis)
# Ensure we have np.arrays, get outdtype
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
outdtype = np.result_type(x,y,np.complex64)
else:
outdtype = np.result_type(x,np.complex64)
if not same_data:
# Check if we can broadcast the outer axes together
xouter = list(x.shape)
youter = list(y.shape)
xouter.pop(axis)
youter.pop(axis)
try:
outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape
except ValueError:
raise ValueError('x and y cannot be broadcast together.')
if same_data:
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape)
else:
if x.size == 0 or y.size == 0:
outshape = outershape + (min([x.shape[axis], y.shape[axis]]),)
emptyout = np.rollaxis(np.empty(outshape), -1, axis)
return emptyout, emptyout, emptyout
if x.ndim > 1:
if axis != -1:
x = np.rollaxis(x, axis, len(x.shape))
if not same_data and y.ndim > 1:
y = np.rollaxis(y, axis, len(y.shape))
# Check if x and y are the same length, zero-pad if neccesary
if not same_data:
if x.shape[-1] != y.shape[-1]:
if x.shape[-1] < y.shape[-1]:
pad_shape = list(x.shape)
pad_shape[-1] = y.shape[-1] - x.shape[-1]
x = np.concatenate((x, np.zeros(pad_shape)), -1)
else:
pad_shape = list(y.shape)
pad_shape[-1] = x.shape[-1] - y.shape[-1]
y = np.concatenate((y, np.zeros(pad_shape)), -1)
# X and Y are same length now, can test nperseg with either
if x.shape[-1] < nperseg:
warnings.warn('nperseg = {0:d}, is greater than input length = {1:d}, '
'using nperseg = {1:d}'.format(nperseg, x.shape[-1]))
nperseg = x.shape[-1]
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if nfft is None:
nfft = nperseg
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
elif noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
else:
noverlap = int(noverlap)
# Handle detrending and window functions
if not detrend:
def detrend_func(d):
return d
elif not hasattr(detrend, '__call__'):
def detrend_func(d):
return signaltools.detrend(d, type=detrend, axis=-1)
elif axis != -1:
# Wrap this function so that it receives a shape that it could
# reasonably expect to receive.
def detrend_func(d):
d = np.rollaxis(d, -1, axis)
d = detrend(d)
return np.rollaxis(d, axis, len(d.shape))
else:
detrend_func = detrend
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of nperseg')
if np.result_type(win,np.complex64) != outdtype:
win = win.astype(outdtype)
if mode == 'psd':
if scaling == 'density':
scale = 1.0 / (fs * (win*win).sum())
elif scaling == 'spectrum':
scale = 1.0 / win.sum()**2
else:
raise ValueError('Unknown scaling: %r' % scaling)
else:
scale = 1
if return_onesided is True:
if np.iscomplexobj(x):
sides = 'twosided'
else:
sides = 'onesided'
if not same_data:
if np.iscomplexobj(y):
sides = 'twosided'
else:
sides = 'twosided'
if sides == 'twosided':
num_freqs = nfft
elif sides == 'onesided':
if nfft % 2:
num_freqs = (nfft + 1)//2
else:
num_freqs = nfft//2 + 1
# Perform the windowed FFTs
result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft)
result = result[..., :num_freqs]
freqs = fftpack.fftfreq(nfft, 1/fs)[:num_freqs]
if not same_data:
# All the same operations on the y data
result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft)
result_y = result_y[..., :num_freqs]
result = np.conjugate(result) * result_y
elif mode == 'psd':
result = np.conjugate(result) * result
elif mode == 'magnitude':
result = np.absolute(result)
elif mode == 'angle' or mode == 'phase':
result = np.angle(result)
elif mode == 'complex':
pass
result *= scale
if sides == 'onesided':
if nfft % 2:
result[...,1:] *= 2
else:
# Last point is unpaired Nyquist freq point, don't double
result[...,1:-1] *= 2
t = np.arange(nfft/2, x.shape[-1] - nfft/2 + 1, nfft - noverlap)/float(fs)
if sides != 'twosided' and not nfft % 2:
# get the last value correctly, it is negative otherwise
freqs[-1] *= -1
# we unwrap the phase here to handle the onesided vs. twosided case
if mode == 'phase':
result = np.unwrap(result, axis=-1)
result = result.astype(outdtype)
# All imaginary parts are zero anyways
if same_data:
result = result.real
# Output is going to have new last axis for window index
if axis != -1:
# Specify as positive axis index
if axis < 0:
axis = len(result.shape)-1-axis
# Roll frequency axis back to axis where the data came from
result = np.rollaxis(result, -1, axis)
else:
# Make sure window/time index is last axis
result = np.rollaxis(result, -1, -2)
return freqs, result, t
def _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft):
'''
Calculate windowed FFT, for internal use by scipy.signal._spectral_helper
This is a helper function that does the main FFT calculation for
_spectral helper. All input valdiation is performed there, and the data
axis is assumed to be the last axis of x. It is not designed to be called
externally. The windows are not averaged over; the result from each window
is returned.
Returns
-------
result : ndarray
Array of FFT data
References
----------
stackoverflow: Repeat NumPy array without replicating data?
<http://stackoverflow.com/a/5568169>
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
'''
# Created strided array of data segments
if nperseg == 1 and noverlap == 0:
result = x[..., np.newaxis]
else:
step = nperseg - noverlap
shape = x.shape[:-1]+((x.shape[-1]-noverlap)//step, nperseg)
strides = x.strides[:-1]+(step*x.strides[-1], x.strides[-1])
result = np.lib.stride_tricks.as_strided(x, shape=shape,
strides=strides)
# Detrend each data segment individually
result = detrend_func(result)
# Apply window by multiplication
result = win * result
# Perform the fft. Acts on last axis by default. Zero-pads automatically
result = fftpack.fft(result, n=nfft)
return result
| bsd-3-clause |
etkirsch/scikit-learn | benchmarks/bench_plot_ward.py | 290 | 1260 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
| bsd-3-clause |
drammock/mne-python | examples/connectivity/mixed_source_space_connectivity.py | 6 | 7054 | """
===============================================================================
Compute mixed source space connectivity and visualize it using a circular graph
===============================================================================
This example computes the all-to-all connectivity between 75 regions in a
mixed source space based on dSPM inverse solutions and a FreeSurfer cortical
parcellation. The connectivity is visualized using a circular graph which
is ordered based on the locations of the regions in the axial plane.
"""
# Author: Annalisa Pascarella <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import mne
import matplotlib.pyplot as plt
from mne.datasets import sample
from mne import setup_volume_source_space, setup_source_space
from mne import make_forward_solution
from mne.io import read_raw_fif
from mne.minimum_norm import make_inverse_operator, apply_inverse_epochs
from mne.connectivity import spectral_connectivity
from mne.viz import circular_layout, plot_connectivity_circle
# Set directories
data_path = sample.data_path()
subject = 'sample'
data_dir = op.join(data_path, 'MEG', subject)
subjects_dir = op.join(data_path, 'subjects')
bem_dir = op.join(subjects_dir, subject, 'bem')
# Set file names
fname_aseg = op.join(subjects_dir, subject, 'mri', 'aseg.mgz')
fname_model = op.join(bem_dir, '%s-5120-bem.fif' % subject)
fname_bem = op.join(bem_dir, '%s-5120-bem-sol.fif' % subject)
fname_raw = data_dir + '/sample_audvis_filt-0-40_raw.fif'
fname_trans = data_dir + '/sample_audvis_raw-trans.fif'
fname_cov = data_dir + '/ernoise-cov.fif'
fname_event = data_dir + '/sample_audvis_filt-0-40_raw-eve.fif'
# List of sub structures we are interested in. We select only the
# sub structures we want to include in the source space
labels_vol = ['Left-Amygdala',
'Left-Thalamus-Proper',
'Left-Cerebellum-Cortex',
'Brain-Stem',
'Right-Amygdala',
'Right-Thalamus-Proper',
'Right-Cerebellum-Cortex']
# Setup a surface-based source space, oct5 is not very dense (just used
# to speed up this example; we recommend oct6 in actual analyses)
src = setup_source_space(subject, subjects_dir=subjects_dir,
spacing='oct5', add_dist=False)
# Setup a volume source space
# set pos=10.0 for speed, not very accurate; we recommend something smaller
# like 5.0 in actual analyses:
vol_src = setup_volume_source_space(
subject, mri=fname_aseg, pos=10.0, bem=fname_model,
add_interpolator=False, # just for speed, usually use True
volume_label=labels_vol, subjects_dir=subjects_dir)
# Generate the mixed source space
src += vol_src
# Load data
raw = read_raw_fif(fname_raw)
raw.pick_types(meg=True, eeg=False, eog=True, stim=True).load_data()
events = mne.find_events(raw)
noise_cov = mne.read_cov(fname_cov)
# compute the fwd matrix
fwd = make_forward_solution(raw.info, fname_trans, src, fname_bem,
mindist=5.0) # ignore sources<=5mm from innerskull
del src
# Define epochs for left-auditory condition
event_id, tmin, tmax = 1, -0.2, 0.5
reject = dict(mag=4e-12, grad=4000e-13, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
reject=reject, preload=False)
del raw
# Compute inverse solution and for each epoch
snr = 1.0 # use smaller SNR for raw data
inv_method = 'dSPM'
parc = 'aparc' # the parcellation to use, e.g., 'aparc' 'aparc.a2009s'
lambda2 = 1.0 / snr ** 2
# Compute inverse operator
inverse_operator = make_inverse_operator(
epochs.info, fwd, noise_cov, depth=None, fixed=False)
del fwd
stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, inv_method,
pick_ori=None, return_generator=True)
# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
labels_parc = mne.read_labels_from_annot(subject, parc=parc,
subjects_dir=subjects_dir)
# Average the source estimates within each label of the cortical parcellation
# and each sub-structure contained in the source space.
# When mode = 'mean_flip', this option is used only for the cortical labels.
src = inverse_operator['src']
label_ts = mne.extract_label_time_course(
stcs, labels_parc, src, mode='mean_flip', allow_empty=True,
return_generator=True)
# We compute the connectivity in the alpha band and plot it using a circular
# graph layout
fmin = 8.
fmax = 13.
sfreq = epochs.info['sfreq'] # the sampling frequency
con, freqs, times, n_epochs, n_tapers = spectral_connectivity(
label_ts, method='pli', mode='multitaper', sfreq=sfreq, fmin=fmin,
fmax=fmax, faverage=True, mt_adaptive=True, n_jobs=1)
# We create a list of Label containing also the sub structures
labels_aseg = mne.get_volume_labels_from_src(src, subject, subjects_dir)
labels = labels_parc + labels_aseg
# read colors
node_colors = [label.color for label in labels]
# We reorder the labels based on their location in the left hemi
label_names = [label.name for label in labels]
lh_labels = [name for name in label_names if name.endswith('lh')]
rh_labels = [name for name in label_names if name.endswith('rh')]
# Get the y-location of the label
label_ypos_lh = list()
for name in lh_labels:
idx = label_names.index(name)
ypos = np.mean(labels[idx].pos[:, 1])
label_ypos_lh.append(ypos)
try:
idx = label_names.index('Brain-Stem')
except ValueError:
pass
else:
ypos = np.mean(labels[idx].pos[:, 1])
lh_labels.append('Brain-Stem')
label_ypos_lh.append(ypos)
# Reorder the labels based on their location
lh_labels = [label for (yp, label) in sorted(zip(label_ypos_lh, lh_labels))]
# For the right hemi
rh_labels = [label[:-2] + 'rh' for label in lh_labels
if label != 'Brain-Stem' and label[:-2] + 'rh' in rh_labels]
# Save the plot order
node_order = lh_labels[::-1] + rh_labels
node_angles = circular_layout(label_names, node_order, start_pos=90,
group_boundaries=[0, len(label_names) // 2])
# Plot the graph using node colors from the FreeSurfer parcellation. We only
# show the 300 strongest connections.
conmat = con[:, :, 0]
fig = plt.figure(num=None, figsize=(8, 8), facecolor='black')
plot_connectivity_circle(conmat, label_names, n_lines=300,
node_angles=node_angles, node_colors=node_colors,
title='All-to-All Connectivity left-Auditory '
'Condition (PLI)', fig=fig)
###############################################################################
# Save the figure (optional)
# --------------------------
#
# By default matplotlib does not save using the facecolor, even though this was
# set when the figure was generated. If not set via savefig, the labels, title,
# and legend will be cut off from the output png file::
#
# >>> fname_fig = data_path + '/MEG/sample/plot_mixed_connect.png'
# >>> plt.savefig(fname_fig, facecolor='black')
| bsd-3-clause |
smartscheduling/scikit-learn-categorical-tree | examples/svm/plot_rbf_parameters.py | 35 | 8096 | '''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radius Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by give the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector it-self and no amount of
regularization with ``C`` will be able to prevent of overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on a the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can
smoothed out by increasing the number of CV iterations ``n_iter`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.grid_search import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 class to has
# to make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(y, n_iter=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
# We extract just the scores
scores = [x[1] for x in grid.grid_scores_]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
| bsd-3-clause |
carrillo/scikit-learn | sklearn/manifold/isomap.py | 229 | 7169 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
infilect/ml-course1 | week2/mnist_siamese/visualize.py | 2 | 1548 | from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import matplotlib.pyplot as plt
plt.switch_backend('agg')
from matplotlib import offsetbox
def visualize(embed, x_test, img_name):
# two ways of visualization: scale to fit [0,1] scale
# feat = embed - np.min(embed, 0)
# feat /= np.max(feat, 0)
# two ways of visualization: leave with original scale
feat = embed
ax_min = np.min(embed,0)
ax_max = np.max(embed,0)
ax_dist_sq = np.sum((ax_max-ax_min)**2)
fig = plt.figure()
ax = plt.subplot(111)
shown_images = np.array([[1., 1.]])
for i in range(feat.shape[0]):
dist = np.sum((feat[i] - shown_images)**2, 1)
if np.min(dist) < 3e-4*ax_dist_sq: # don't show points that are too close
continue
shown_images = np.r_[shown_images, [feat[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(x_test[i], zoom=0.6, cmap=plt.cm.gray_r),
xy=feat[i], frameon=False
)
ax.add_artist(imagebox)
plt.axis([ax_min[0], ax_max[0], ax_min[1], ax_max[1]])
# plt.xticks([]), plt.yticks([])
plt.title('Embedding from the last layer of the network')
#plt.show()
fig.savefig(img_name)
if __name__ == "__main__":
mnist = input_data.read_data_sets('MNIST_data', one_hot=False)
x_test = mnist.test.images
x_test = x_test.reshape([-1, 28, 28])
embed = np.fromfile('embed.txt', dtype=np.float32)
embed = embed.reshape([-1, 2])
visualize(embed, x_test)
| mit |
RPGroup-PBoC/gist_pboc_2017 | code/project_pt3_functions_and_iteration.py | 1 | 8915 | # Import the necessary modules.
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Image processing utilities.
import skimage.io
import skimage.measure
import skimage.filters
import skimage.segmentation
# For file management.
import glob
# In this script, we'll write all of the image processing steps we've executed
# so far as two functions. We would like to split these functions up in to two
# parts.
# 1. Performing the segmentation. This should take an image, a threshold
# value, and some bounds for the area filtering. This will return the
# labeled segmentation mask.
# 2. Extracting the fluorescence intentsities of each object. This should take
# a segmentation mask and a fluorescence image. This will return an array of
# the mean fluorescence for each cell.
# Why would we want to write this as a function? We'll be doing this same
# procedure many times over each image in our sample. We'll have more than 100
# images, so it would be best to have this be modular to save our fingers some
# work. To verify that our functions work, we'll make sure that we get the
# exact same result when type each command out by hand on a single image.
# To start, let's retype what we've done so far. We'll generate a final
# segmentation mask and a list of cell intensities.
phase_image = skimage.io.imread('data/lacI_titration/O2_delta_phase_pos_16.tif')
# Normalize the image and perform a background subtraction.
im_float = (phase_image - phase_image.min()) / (phase_image.max() - phase_image.min())
im_blur = skimage.filters.gaussian(im_float, sigma=50.0)
im_sub = im_float - im_blur
# Apply the threshold and the area filter.
im_thresh = im_sub < -0.2
im_label = skimage.measure.label(im_thresh)
props = skimage.measure.regionprops(im_label)
ip_dist = 0.160 # in units of microns per pixel.
approved_objects = np.zeros_like(im_label)
for prop in props:
area = prop.area * ip_dist**2
if (area > 0.5) & (area < 6):
approved_objects += im_label==prop.label
# Clear the border and relabel.
im_border = skimage.segmentation.clear_border(approved_objects > 0)
im_relab = skimage.measure.label(im_border)
# Show the final segmentation mask.
plt.figure()
plt.imshow(im_relab, cmap=plt.cm.spectral_r)
plt.show()
# Load the fluorescence image and compute the cell intensities.
fluo_im = skimage.io.imread('data/lacI_titration/O2_delta_yfp_pos_16.tif')
props = skimage.measure.regionprops(im_relab, intensity_image=fluo_im)
cell_intensities = []
for prop in props:
cell_intensities.append(prop.mean_intensity)
# Print the mean cell intensities to the screen.
mean_int = np.mean(cell_intensities)
print("The mean cell intensity coded by hand is " + str(mean_int) + " counts.")
# This looks like what we've done in the past three project parts. Let's try
# writing these as functions. We'll write this functions such that they take
# only one or two arguments with the other parameters as keyword arguments.
# We'll start with the segmentation function.
def phase_segmentation(image, threshold, area_bounds=[0.5, 6.0],
ip_dist=0.160):
"""
Segement a phase image and return the mask.
Parameters
----------
image : 2d-array
The phase image to be segmented. This image will be converted to a
float type.
threshold : float
Threshold value for the segmentation. This function will select objects
below this threshold value.
area_bounds : list, default=[0.5, 6.0]
Area bounds for identified objects. This should be a list of two entries.
ip_dist : int or float, default = 0.160
Interpixel distance for the camera. This should be in units of microns
per pixel.
Returns
-------
final_seg : 2d-array
Final, labeled segmentation mask.
"""
# First is to convert the image to a float.
im_float = (image - image.min()) / (image.max() - image.min())
# Do a background subtraction.
im_blur = skimage.filters.gaussian(im_float, sigma=50.0)
im_sub = im_float - im_blur
# Apply the threshold.
im_thresh = im_sub < threshold # Note that we are using the provided arg
# Label the image and apply the area bounds.
im_lab = skimage.measure.label(im_thresh)
props = skimage.measure.regionprops(im_lab)
approved_objects = np.zeros_like(im_lab)
for prop in props:
area = prop.area * ip_dist**2
if (area > area_bounds[0]) & (area < area_bounds[1]):
approved_objects += im_lab == prop.label
# Clear the border and relabel.
im_border = skimage.segmentation.clear_border(approved_objects > 0)
final_seg = skimage.measure.label(im_border)
# Return the final segmentation mask
return final_seg
# Now let's try writing one for to extract the mean intensities.
def extract_intensity(seg, fluo_im):
"""
Extracts the mean intensity of objects in a segmented image.
Parameters
----------
seg : 2d-array, int
Segmentation mask with labeled objects.
fluo_im : 2d-array, int
Fluorescence image to extract intensities from.
Returns
-------
cell_ints : 1d-array
Vector of mean cell intensities. This has a length the same as the
number of objects in the provided segmentation mask.
"""
# Get the region props of the fluorescence image using the segmentation
# mask.
props = skimage.measure.regionprops(seg, intensity_image=fluo_im)
cell_ints = []
for prop in props:
cell_ints.append(prop.mean_intensity)
# Convert the cell_ints to an array and return.
return np.array(cell_ints)
# Let's test these two functions out on our image and make sure that they
# return the same values that we got from our hand coded version.
seg_mask = phase_segmentation(phase_image, -0.2)
cell_ints = extract_intensity(seg_mask, fluo_im)
mean_int_func = np.mean(cell_ints)
print("The mean cell intensity from our function is " + str(mean_int_func) + " counts.")
# Finally, we can test that the segmentation masks are the same by checking
# if each pixel in the two masks are identical.
print((seg_mask == im_relab).all()) # Checks that all pixels are the same.
# Great! They all seem to work. Ultimately, we would like to iterate this over
# all of our images for each concentration and each channel. Let's try doing
# this with our functions and generate a distribution of the fluorescence
# intensity for all images in the O2 delta images. To get a list of all of
# the images in the directory, we will use the glob module.
phase_names = glob.glob('data/lacI_titration/O2_delta_phase*.tif')
fluo_names = glob.glob('data/lacI_titration/O2_delta_yfp*.tif')
# We'll make an empty vector to store all of our cell intensity vectors.
cell_intensities = []
# Now we just have to loop through each file.
for i in range(len(phase_names)):
# Load the two images.
phase_im = skimage.io.imread(phase_names[i])
fluo_im = skimage.io.imread(fluo_names[i])
# Perform the segmentation.
seg_mask = phase_segmentation(phase_im, -0.2)
# Extract the intensities and append them.
ints = extract_intensity(seg_mask, fluo_im)
for val in ints:
cell_intensities.append(val)
# Now we can plot the distribution!
print('Extracted ' + str(len(cell_intensities)) + ' intensity values.')
plt.figure()
plt.hist(cell_intensities, bins=100)
plt.xlabel('mean YFP cell intensity (a. u.)')
plt.ylabel('counts')
plt.title('constitutive expression')
plt.show()
# We see that we quite a lot of cells. Their distribution has a range between
# 2000 and 6000, like we saw in the previous distribution from a single image.
# Let's look at another image set to see how well our functions work. Let's
# take a look at the fluorescence distribution for the O2 autofluorescent
# strain. Remember, these cells should be very dark compared to the delta
# strain.
phase_names_auto = glob.glob('data/lacI_titration/O2_auto_phase*.tif')
fluo_names_auto = glob.glob('data/lacI_titration/O2_auto_yfp*.tif')
auto_cell_intensities = []
for i in range(len(phase_names_auto)):
phase_im = skimage.io.imread(phase_names_auto[i])
fluo_im = skimage.io.imread(fluo_names_auto[i])
seg_mask = phase_segmentation(phase_im, -0.2)
ints = extract_intensity(seg_mask, fluo_im)
for val in ints:
auto_cell_intensities.append(val)
# Let's look at the distribution.
# Now we can plot the distribution!
print('Extracted ' + str(len(auto_cell_intensities)) + ' auto intensity values.')
plt.figure()
plt.hist(auto_cell_intensities, bins=100)
plt.xlabel('mean YFP cell intensity (a. u.)')
plt.ylabel('counts')
plt.title('autofluorescence')
plt.show()
# That's quite a difference!
# In our next script, we will iterate this over all of the images in our
# collection and test our theory for fold-change in gene expression for simple
# repression.
| mit |
FrankBian/kuma | vendor/packages/ipython/test/test_shell_options.py | 5 | 2964 | #!/usr/bin/env python
"""A few unit tests for the Shell module.
"""
from unittest import TestCase, main
from IPython import Shell
try:
import matplotlib
has_matplotlib = True
except ImportError:
has_matplotlib = False
class ShellTestBase(TestCase):
def _test(self,argv,ans):
shell = Shell._select_shell(argv)
err = 'Got %s != %s' % (shell,ans)
self.failUnlessEqual(shell,ans,err)
class ArgsTestCase(ShellTestBase):
def test_plain(self):
self._test([],Shell.IPShell)
def test_tkthread(self):
self._test(['-tkthread'],Shell.IPShell)
def test_gthread(self):
self._test(['-gthread'],Shell.IPShellGTK)
def test_qthread(self):
self._test(['-qthread'],Shell.IPShellQt)
def test_q4thread(self):
self._test(['-q4thread'],Shell.IPShellQt4)
def test_wthread(self):
self._test(['-wthread'],Shell.IPShellWX)
if has_matplotlib:
class MplArgsTestCase(ShellTestBase):
def setUp(self):
self.backend = matplotlib.rcParams['backend']
def tearDown(self):
matplotlib.rcParams['backend'] = self.backend
def _test(self,argv,ans):
shell = Shell._select_shell(argv)
err = 'Got %s != %s' % (shell,ans)
self.failUnlessEqual(shell,ans,err)
def test_tk(self):
matplotlib.rcParams['backend'] = 'TkAgg'
self._test(['-pylab'],Shell.IPShellMatplotlib)
def test_ps(self):
matplotlib.rcParams['backend'] = 'PS'
self._test(['-pylab'],Shell.IPShellMatplotlib)
def test_gtk(self):
matplotlib.rcParams['backend'] = 'GTKAgg'
self._test(['-pylab'],Shell.IPShellMatplotlibGTK)
def test_gtk_2(self):
self._test(['-gthread','-pylab'],Shell.IPShellMatplotlibGTK)
self.failUnlessEqual(matplotlib.rcParams['backend'],'GTKAgg')
def test_qt(self):
matplotlib.rcParams['backend'] = 'QtAgg'
self._test(['-pylab'],Shell.IPShellMatplotlibQt)
def test_qt_2(self):
self._test(['-qthread','-pylab'],Shell.IPShellMatplotlibQt)
self.failUnlessEqual(matplotlib.rcParams['backend'],'QtAgg')
def test_qt4(self):
matplotlib.rcParams['backend'] = 'Qt4Agg'
self._test(['-pylab'],Shell.IPShellMatplotlibQt4)
def test_qt4_2(self):
self._test(['-q4thread','-pylab'],Shell.IPShellMatplotlibQt4)
self.failUnlessEqual(matplotlib.rcParams['backend'],'Qt4Agg')
def test_wx(self):
matplotlib.rcParams['backend'] = 'WxAgg'
self._test(['-pylab'],Shell.IPShellMatplotlibWX)
def test_wx_2(self):
self._test(['-pylab','-wthread'],Shell.IPShellMatplotlibWX)
self.failUnlessEqual(matplotlib.rcParams['backend'],'WXAgg')
main()
| mpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.