repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
fducau/infoVAE | vae_1.py | 1 | 14008 | import numpy as np
import tensorflow as tf
import input_data
from tensorflow.contrib.distributions import Normal
import copy
np.random.seed(0)
tf.set_random_seed(0)
network_architecture = dict(n_hidden_recog_1=500, # 1st layer encoder neurons
n_hidden_recog_2=500, # 2nd layer encoder neurons
n_hidden_gener_1=500, # 1st layer decoder neurons
n_hidden_gener_2=500, # 2nd layer decoder neurons
n_input=784, # MNIST data input (img shape: 28*28)
n_z=20, # dimensionality of latent space
info=False)
# Load MNIST data in a format suited for tensorflow.
# The script input_data is available under this URL:
# https://raw.githubusercontent.com/tensorflow/tensorflow/master/tensorflow/g3doc/tutorials/mnist/input_data.py
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
n_samples = mnist.train.num_examples
def xavier_init(fan_in, fan_out, constant=1):
""" Xavier initialization of network weights"""
# https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow
low = - constant * np.sqrt(6.0 / (fan_in + fan_out))
high = constant * np.sqrt(6.0 / (fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out),
minval=low, maxval=high,
dtype=tf.float32)
class VariationalAutoencoder(object):
""" Variation Autoencoder (VAE) with an sklearn-like interface implemented using TensorFlow.
This implementation uses probabilistic encoders and decoders using Gaussian
distributions and realized by multi-layer perceptrons. The VAE can be learned
end-to-end.
See "Auto-Encoding Variational Bayes" by Kingma and Welling for more details.
"""
def __init__(self, network_architecture, transfer_fct=tf.nn.softplus,
learning_rate=0.001, batch_size=100):
self.network_architecture = network_architecture
self.transfer_fct = transfer_fct
self.learning_rate = learning_rate
self.batch_size = batch_size
self.step = 0
self.summary_dir = './summary/'
self.info = network_architecture['info']
self.sess = tf.InteractiveSession()
# tf Graph input
self.x = tf.placeholder(tf.float32, [None, network_architecture["n_input"]])
# Create autoencoder network
self._create_network()
# Define loss function based variational upper-bound and
# corresponding optimizer
self._create_loss_optimizer()
self.train_summary_writer = tf.train.SummaryWriter(self.summary_dir, self.sess.graph)
self.saver = tf.train.Saver(tf.all_variables())
# Initializing the tensor flow variables
init = tf.initialize_all_variables()
# Launch the session
self.sess.run(init)
def _create_network(self):
# Initialize autoencode network weights and biases
network_weights = self._initialize_weights(**self.network_architecture)
# Use recognition network to determine mean and
# (log) variance of Gaussian distribution in latent
# space
self.z_mean, self.z_log_sigma_sq = \
self._recognition_network(network_weights["weights_recog"],
network_weights["biases_recog"],
self.x)
# Draw one sample z from Gaussian distribution
n_z = self.network_architecture["n_z"]
eps = tf.random_normal((self.batch_size, n_z), 0, 1,
dtype=tf.float32)
# z = mu + sigma*epsilon
self.z = tf.add(self.z_mean,
tf.mul(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps),
name='z')
# Use generator to determine mean of
# Bernoulli distribution of reconstructed input
self.x_reconstr_mean = \
self._generator_network(network_weights["weights_gener"],
network_weights["biases_gener"],
z=self.z)
####
####
####
eps = tf.random_normal((self.batch_size, n_z), 0, 1,
dtype=tf.float32)
self.z_theta = tf.add(0.0, tf.mul(1.0, eps), name='z_theta')
self.x_prime = self._generator_network(network_weights["weights_gener"],
network_weights["biases_gener"],
z=self.z_theta)
self.z_prime_mean, self.z_prime_log_sigma_sq = self._recognition_network(
network_weights["weights_recog"],
network_weights["biases_recog"],
self.x_prime)
dist = Normal(mu=self.z_prime_mean, sigma=tf.sqrt(tf.exp(self.z_prime_log_sigma_sq)))
logli = tf.reduce_sum(dist.log_pdf(self.z_theta, name='x_entropy'), reduction_indices=1)
self.cross_entropy = tf.reduce_mean(- logli)
#self.cross_entropy = tf.reduce_mean(- dist.log_pdf(self.z_theta, name='x_entropy'))
self.entropy = tf.constant(28.37)
def _initialize_weights(self, n_hidden_recog_1, n_hidden_recog_2,
n_hidden_gener_1, n_hidden_gener_2,
n_input, n_z, info):
all_weights = dict()
all_weights['weights_recog'] = {
'h1': tf.Variable(xavier_init(n_input, n_hidden_recog_1)),
'h2': tf.Variable(xavier_init(n_hidden_recog_1, n_hidden_recog_2)),
'out_mean': tf.Variable(xavier_init(n_hidden_recog_2, n_z)),
'out_log_sigma': tf.Variable(xavier_init(n_hidden_recog_2, n_z))}
all_weights['biases_recog'] = {
'b1': tf.Variable(tf.zeros([n_hidden_recog_1], dtype=tf.float32)),
'b2': tf.Variable(tf.zeros([n_hidden_recog_2], dtype=tf.float32)),
'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32)),
'out_log_sigma': tf.Variable(tf.zeros([n_z], dtype=tf.float32))}
all_weights['weights_gener'] = {
'h1': tf.Variable(xavier_init(n_z, n_hidden_gener_1)),
'h2': tf.Variable(xavier_init(n_hidden_gener_1, n_hidden_gener_2)),
'out_mean': tf.Variable(xavier_init(n_hidden_gener_2, n_input)),
'out_log_sigma': tf.Variable(xavier_init(n_hidden_gener_2, n_input))}
all_weights['biases_gener'] = {
'b1': tf.Variable(tf.zeros([n_hidden_gener_1], dtype=tf.float32)),
'b2': tf.Variable(tf.zeros([n_hidden_gener_2], dtype=tf.float32)),
'out_mean': tf.Variable(tf.zeros([n_input], dtype=tf.float32)),
'out_log_sigma': tf.Variable(tf.zeros([n_input], dtype=tf.float32))}
return all_weights
def _recognition_network(self, weights, biases, x):
# Generate probabilistic encoder (recognition network), which
# maps inputs onto a normal distribution in latent space.
# The transformation is parametrized and can be learned.
layer_1 = self.transfer_fct(tf.add(tf.matmul(x, weights['h1']),
biases['b1']))
layer_2 = self.transfer_fct(tf.add(tf.matmul(layer_1, weights['h2']),
biases['b2']))
z_mean = tf.add(tf.matmul(layer_2, weights['out_mean']),
biases['out_mean'])
z_log_sigma_sq = \
tf.add(tf.matmul(layer_2, weights['out_log_sigma']),
biases['out_log_sigma'])
return (z_mean, z_log_sigma_sq)
def _generator_network(self, weights, biases, z):
# Generate probabilistic decoder (decoder network), which
# maps points in latent space onto a Bernoulli distribution in data space.
# The transformation is parametrized and can be learned.
layer_1 = self.transfer_fct(tf.add(tf.matmul(z, weights['h1']),
biases['b1']))
layer_2 = self.transfer_fct(tf.add(tf.matmul(layer_1, weights['h2']),
biases['b2']))
x_reconstr_mean = \
tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['out_mean']),
biases['out_mean']))
return x_reconstr_mean
def _create_loss_optimizer(self):
# The loss is composed of two terms:
# 1.) The reconstruction loss (the negative log probability
# of the input under the reconstructed Bernoulli distribution
# induced by the decoder in the data space).
# This can be interpreted as the number of "nats" required
# for reconstructing the input when the activation in latent
# is given.
# Adding 1e-10 to avoid evaluatio of log(0.0)
reconstr_loss = -\
tf.reduce_sum(self.x * tf.log(1e-10 + self.x_reconstr_mean) +
(1 - self.x) * tf.log(1e-10 + 1 - self.x_reconstr_mean), 1,
name='reconstruction_loss')
# 2.) The latent loss, which is defined as the Kullback Leibler divergence
# between the distribution in latent space induced by the encoder on
# the data and some prior. This acts as a kind of regularizer.
# This can be interpreted as the number of "nats" required
# for transmitting the the latent space distribution given
# the prior.
latent_loss = -0.5 * tf.reduce_sum(1 + self.z_log_sigma_sq -
tf.square(self.z_mean) -
tf.exp(self.z_log_sigma_sq), 1,
name='latent_loss')
# 3.) Mutual Information loss
self.lmbda = tf.constant(1.0)
self.MI = tf.add(self.entropy, - self.cross_entropy, name='MI_loss')
if self.info:
self.cost = tf.reduce_mean(reconstr_loss + latent_loss - self.MI)
else:
self.cost = tf.reduce_mean(reconstr_loss - latent_loss)
# Use ADAM optimizer
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.cost)
rec_summary = tf.scalar_summary('reconstruction loss', tf.reduce_mean(reconstr_loss))
latent_summary = tf.scalar_summary('KLD q(z|x) || p(z)', tf.reduce_mean(latent_loss))
cost_summary = tf.scalar_summary('Cost', self.cost)
sigma_summary = tf.scalar_summary('Sigma', tf.reduce_mean(tf.sqrt(tf.exp(self.z_log_sigma_sq))))
mu_summary = tf.scalar_summary('mu', tf.reduce_mean(self.z_mean))
# q_MI_summary = tf.scalar_summary('q_theta(z|x)', tf.reduce_mean(self.q_z_theta_given_x_prime))
x_entropy_summary = tf.scalar_summary('H(z|x)', self.cross_entropy)
MI_summary = tf.scalar_summary('MI', self.MI)
summaries = [rec_summary, latent_summary, cost_summary, x_entropy_summary, MI_summary, sigma_summary, mu_summary]
self.merged = tf.merge_summary(summaries)
def partial_fit(self, X, last=False):
"""Train model based on mini-batch of input data.
Return cost of mini-batch.
"""
opt, cost, cross_entropy, MI, summary = \
self.sess.run((self.optimizer, self.cost,
self.cross_entropy,
self.MI,
self.merged),
feed_dict={self.x: X})
self.train_summary_writer.add_summary(summary, self.step)
if last:
self.saver.save(self.sess, 'model')
self.step += 1
return cost
def transform(self, X):
"""Transform data by mapping it into the latent space."""
# Note: This maps to mean of distribution, we could alternatively
# sample from Gaussian distribution
return self.sess.run(self.z_mean, feed_dict={self.x: X})
def generate(self, z_mu=None):
""" Generate data by sampling from latent space.
If z_mu is not None, data for this point in latent space is
generated. Otherwise, z_mu is drawn from prior in latent
space.
"""
if z_mu is None:
z_mu = np.random.normal(size=self.network_architecture["n_z"])
# Note: This maps to mean of distribution, we could alternatively
# sample from Gaussian distribution
return self.sess.run(self.x_reconstr_mean,
feed_dict={self.z: z_mu})
def reconstruct(self, X):
""" Use VAE to reconstruct given data. """
return self.sess.run(self.x_reconstr_mean,
feed_dict={self.x: X})
def train(network_architecture, learning_rate=0.001,
batch_size=100, training_epochs=10, display_step=5,
info=False):
network_arch = copy.deepcopy(network_architecture)
network_arch['info'] = info
vae = VariationalAutoencoder(network_arch,
learning_rate=learning_rate,
batch_size=batch_size)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs, _ = mnist.train.next_batch(batch_size)
# Fit training using batch data
if i == total_batch:
cost = vae.partial_fit(batch_xs, last=True)
else:
cost = vae.partial_fit(batch_xs)
# Compute average loss
avg_cost += cost / n_samples * batch_size
# Display logs per epoch step
if epoch % display_step == 0:
print "Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost)
return vae
def main():
vae = train(network_architecture, training_epochs=25)
if __name__ == '__main__':
main() | gpl-3.0 |
sthyme/ZFSchizophrenia | BehaviorAnalysis/HSMovieAnalysis/imageMode.py | 1 | 6047 | #!/usr/bin/python -tt
"""
This script collects pixel differences between video frames
and saves these differences to a file or files with the extension '.npy'
USAGE: python deltaPix.py [videoFile]
If no video file is specified, live video is processed
If a video file is specified, this script runs through the video frame by frame
STUFF THAT CAN BE ADJUSTED:
pixel threshold = how many pixels is a real movement?
experiment length: this is only relevant for live video: see expDuration below
experiment length can also be manually set by hitting 'q' during the video feed
frameRate = this is used to set the timestamps of each video frame
estimated at 30 fps, but user may adjust
This script will report how many frames were analyzed
To set actual fps, run script to calculate # frames,
and divide by running length of the video
Enter this number into 'frameRate' below, and rerun this script
NEED:
1) the two output files automatically created by roiSelect.py: image.png and mask.png
2) the imageTools.py set of tools
NOTE:
Before this script runs, it will ask you if you want to
(a)append new data to the .npy file(s) already in the directory
(n)ew experiment: clear out old .npy files before running the analysis
"""
def keepOrAppend(clearData):
# check to see if clear existing data
# or append this data to existing data
if clearData == 'n':
print "Actually not clearing data, but need to make a new file anyway"
#imageTools.deleteData('*.npy')
else:
print "Keeping existing data ..."
# IMPORT NECESSARY MODULES
import matplotlib.image as mpimg
import numpy as np
import cv2
from datetime import datetime, timedelta
import sys
import imageTools
import motionTools
#from collections import dequeue
from scipy.stats import mode
import glob,os
def calc_mode(deq, nump_arr):
for j,k in enumerate(nump_arr[:,0]): #so k are the values, j are the indicies. all cols will be 1504. so j will reach 1504
nump_arr[j,:] = mode(np.array([x[j,:] for x in deq]))[0]
return nump_arr
def main(pixThreshold,frameRate,videoStream):
expDuration = 600000 # duration of experiment, in seconds; only relevant for live feed
saveFreq = 4500 # how often to save data, in frames
i,m = imageTools.loadImageAndMask()
# convert mask to integer values for bincount weights
m,w = imageTools.convertMaskToWeights(m)
#print m,w
moviedeq = []
i2=0
for file in glob.glob("*_4*avi"):
if i2 == 15:
break
#for x in range(0,5):
# start camera or open video
videoType, displayDiffs = imageTools.getVideoType(videoStream)
# print "testing: ", file
#print "testing: ", videoStream.split('-')[x]
cap = cv2.VideoCapture(file)
#cap = cv2.VideoCapture(videoStream.split('-')[x])
# adjust video resolution if necessary (sized to mask)
# print 'Camera resolution is %s x %s' % (str(m.shape[1]),str(m.shape[0]))
# cap.set(3,m.shape[1])
# cap.set(4,m.shape[0])
# Set Pixel Threshold
ret,frame = cap.read()
storedFrame = imageTools.grayBlur(frame)
#pixThreshold = int(np.floor( pixThreshold * storedFrame.shape[0] ))
#print('PixelThreshold is %i') % pixThreshold
# Acquire data
if saveFreq / frameRate > expDuration: # do shorter of expDuration vs. saveFreq
saveFreq = expDuration * frameRate
pixData = np.zeros([ saveFreq, len(np.unique(w)) + 1])
#i = 0 # a counter for saving chunks of data
totalFrames = 0
startTime = datetime.now()
oldTime = startTime
elapsed = 0
print('Analyzing motion data...')
#moviedeq = []
while(cap.isOpened()):
ret,frame = cap.read()
if ret == False:
print 'End of Video'
break
currentFrame = imageTools.grayBlur(frame)
moviedeq.append(currentFrame)
# stop experiment if user presses 'q' or if experiment duration is up
if ( cv2.waitKey(1) & 0xFF == ord('q') or
len(sys.argv) == 1 and datetime.now() > startTime + timedelta(seconds = expDuration)
):
break
# record pixel differences in all of the ROIs
#diff = imageTools.diffImage(storedFrame,currentFrame,pixThreshold,0)
timeDiff = 1. / frameRate
elapsed = elapsed + timeDiff
# print elapsed
# calculate and record pixel differences
#counts = np.bincount(w, weights=diff.ravel())
# print counts # output
#pixData[i,:] = np.hstack((elapsed,counts))
totalFrames += 1
storedFrame = currentFrame # comment out if nothing is in first frame
#oldTime = newTime
i2 += 1
# done recording. Remove empty rows (those bigger than i) from PixData
#pixData = pixData[:i,:]
#print pixData
testing = calc_mode(moviedeq, np.zeros([660,1088]))
print "saving mode.png"
cv2.imwrite('mode.png', testing)
# Save info (elapsed time and framerate) for later use
vidInfo = {}
# analysisTime = timeStamp - startTime
# vidInfo['analysisTime'] = float(str(analysisTime.seconds) + '.' + str(analysisTime.microseconds))
# vidInfo['TotalFrames'] = totalFrames
# vidInfo['fps'] = int(totalFrames/vidInfo['analysisTime'])
# #vidInfo['pixThreshold']=pixThreshold
# vidInfo['CameraResolution']='%s x %s' % (str(m.shape[1]),str(m.shape[0]))
#vidInfo['NamePrefix'] = videoStream.split('-')[x]
#vidInfo['NamePrefix'] = videoStream.split('.')[0]
# print ('Analyzed %i frames in %f seconds') % (vidInfo['TotalFrames'],vidInfo['analysisTime'])
# print('FrameRate is about %i fps') % vidInfo['fps']
# print 'Motion threshold is %i pixels' % int(pixThreshold)
# print 'Camera resolution is %s' % vidInfo['CameraResolution']
# release camera
cap.release()
cv2.destroyAllWindows()
return vidInfo
def cmdLine(pixThreshold,frameRate,videoStream):
#keepOrAppend('n')
#keepOrAppend(raw_input("(a)ppend this new data, or (n)ew experiment? >:"))
vidInfo = main(pixThreshold,frameRate,videoStream)
return vidInfo
if __name__ == '__main__':
pixThreshold = imageTools.getPixThreshold(0.032)
frameRate = imageTools.getFrameRate() # default is 30
videoStream = imageTools.getVideoStreamMode(sys.argv)
#videoStream = imageTools.getVideoStream(sys.argv)
vidInfo = cmdLine(pixThreshold,frameRate,videoStream)
| mit |
mikeireland/veloce | veloce/simulator.py | 1 | 11214 | ##Simulator code
import numpy as np
import matplotlib.pyplot as plt
#Define matrices
#Gah
gah1 = 1
gah2 = 1
gah3 = 1
gah4 = 1
gah5 = 1
gah6 = 1
#ghp
ghp1 = 1
ghp2 = 1
ghp3 = 1
ghp4 = 1
ghp5 = 1
ghp6 = 1
#Gpb
gpb1 = 1
gpb2 = 1
gpb3 = 1
gpb4 = 1
gpb5 = 1
gpb6 = 1
gpb7 = 1
#gpa
gpa1 = 1
gpa2 = 1
gpa3 = 1
gpa4 = 1
gpa5 = 1
gpa6 = 1
gpa7 = 1
#Gps
gps1 = 1
gps2 = 1
gps3 = 1
gps4 = 1
gps5 = 1
gps6 = 1
gps7 = 1
#Gsb
gsb1 = 1
gsb2 = 1
gsb3 = 1
gsb4 = 1
gsb5 = 1
gsb6 = 1
gsb7 = 1
#Gih
gih1 = 1
gih2 = 1
gih3 = 1
gih4 = 1
gih5 = 1
gih6 = 1
#gnm
g12 = 1
g13 = 1
g14 = 1
g15 = 1
g16 = 1
g17 = 1
g23 = 1
g24 = 1
g25 = 1
g26 = 1
g27 = 1
g34 = 1
g35 = 1
g36 = 1
g37 = 1
g45 = 1
g46 = 1
g47 = 1
g56 = 1
g57 = 1
g67 = 1
#ch
ch1 = 1
ch2 = 1
ch3 = 1
ch4 = 1
ch5 = 1
ch6 = 1
#cp
cp1 = 1
cp2 = 1
cp3 = 1
cp4 = 1
cp5 = 1
cp6 = 1
cp7 = 1
#cb
cb = 1
#dt_damp
dt_damp = 1
A_sim= np.array([ [-1/dt_damp, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, (-gpb1 - gpb2 - gpb3 - gpb4 - gpb5 - gpb6 - gpb7 - gsb2 + gsb2**2/(gps2 + gsb2) - gsb4 + gsb4**2/(gps4 + gsb4) - gsb7 + gsb7**2/(gps7 + gsb7))/cb, gpb1/cb, (gpb2 + (gps2*gsb2)/(gps2 + gsb2))/cb, gpb3/cb, (gpb4 + (gps4*gsb4)/(gps4 + gsb4))/cb, gpb5/cb, gpb6/cb, (gpb7 + (gps7*gsb7)/(gps7 + gsb7))/cb,0,0,0,0,0,0],
[(((gah1*ghp1)/(gah1 + ghp1 + gih1) + gpa1))/cp1, (gpb1)/cp1, (-g12 - g14 - g15 - g16 - g17 - ghp1 + ghp1**2/(gah1 + ghp1 + gih1) - gpa1 - gpb1)/cp1, g12/cp1, 0, g14/cp1, g15/cp1, g16/cp1, g17/cp1, (ghp1*gih1)/(cp1*(gah1 + ghp1 + gih1)), 0, 0, 0, 0, 0],
[((gah2*ghp2)/(gah2 + ghp2 + gih2) + gpa2)/cp2, (gpb2 + (gps2*gsb2)/(gps2 + gsb2))/cp2, g12/cp2, (-g12 - g23 - g25 - g26 - ghp2 + ghp2**2/(gah2 + ghp2 + gih2) - gpa2 - gpb2 - gps2 + gps2**2/(gps2 + gsb2))/cp2, g23/cp2, 0, g25/cp2, g26/cp2, 0, 0, (ghp2*gih2)/(cp2*(gah2 + ghp2 + gih2)), 0, 0, 0, 0],
[((gah3*ghp3)/(gah3 + ghp3 + gih3) + gpa3)/cp3, gpb3/cp3, 0, g23/cp3, (-g23 - g34 - g35 - g36 - g37 - ghp3 + ghp3**2/(gah3 + ghp3 + gih3) - gpa3 - gpb3)/cp3, g34/cp3, g35/cp3, g36/cp3, g37/cp3, 0, 0, (ghp3*gih3)/(cp3*(gah3 + ghp3 + gih3)), 0, 0, 0],
[((gah4*ghp4)/(gah4 + ghp4 + gih4) + gpa4)/cp4, (gpb4 + (gps4*gsb4)/(gps4 + gsb4))/cp4, g14/cp4, 0, g34/cp4, (-g14 - g34 - g45 - g46 - g47 - ghp4 + ghp4**2/(gah4 + ghp4 + gih4) - gpa4 - gpb4 - gps4 + gps4**2/(gps4 + gsb4))/cp4, g45/cp4, g46/cp4, g47/cp4, 0, 0, 0, (ghp4*gih4)/(cp4*(gah4 + ghp4 + gih4)),0,0],
[((gah5*ghp5)/(gah5 + ghp5 + gih5) + gpa5)/cp5, gpb5/cp5, g15/cp5, g25/cp5, g35/cp5, g45/cp5, (-g15 - g25 - g35 - g45 - g57 - ghp5 + ghp5**2/(gah5 + ghp5 + gih5) - gpa5 - gpb5)/cp5, 0, g57/cp5, 0, 0, 0, 0, (ghp5*gih5)/(cp5*(gah5 + ghp5 + gih5)), 0],
[((gah6*ghp6)/(gah6 + ghp6 + gih6) + gpa6)/cp6, gpb6/cp6, g16/cp6, g26/cp6, g36/cp6, g46/cp6, 0, (-g16 - g26 - g36 - g46 - g67 - ghp6 + ghp6**2/(gah6 + ghp6 + gih6) - gpa6 - gpb6)/cp6, g67/cp6, 0, 0, 0, 0, 0, (ghp6*gih6)/(cp6*(gah6 + ghp6 + gih6))],
[0, (gpb7 + (gps7*gsb7)/(gps7 + gsb7))/cp7, g17/cp7, 0, g37/cp7, g47/cp7, g57/cp7, g67/cp7, (-g17 - g37 - g47 - g57 - g67 - gpb7 - gps7 + gps7**2/(gps7 + gsb7))/cp7, 0, 0, 0, 0, 0, 0],
[(gah1*gih1)/(ch1*(gah1 + ghp1 + gih1)), 0, (ghp1*gih1)/(ch1*(gah1 + ghp1 + gih1)), 0, 0, 0, 0, 0, 0, (-gih1 + gih1**2/(gah1 + ghp1 + gih1))/ch1, 0, 0, 0, 0, 0],
[(gah2*gih2)/(ch2*(gah2 + ghp2 + gih2)),0, 0, (ghp2*gih2)/(ch2*(gah2 + ghp2 + gih2)), 0, 0, 0, 0, 0, 0, (-gih2 + gih2**2/(gah2 + ghp2 + gih2))/ch2, 0, 0, 0, 0],
[(gah3*gih3)/(ch3*(gah3 + ghp3 + gih3)), 0, 0, 0, (ghp3*gih3)/(ch3*(gah3 + ghp3 + gih3)), 0, 0, 0, 0, 0, 0, (-gih3 + gih3**2/(gah3 + ghp3 + gih3))/ch3, 0, 0, 0],
[(gah4*gih4)/(ch4*(gah4 + ghp4 + gih4)), 0, 0, 0, 0, (ghp4*gih4)/(ch4*(gah4 + ghp4 + gih4)), 0, 0, 0, 0, 0, 0, (-gih4 + gih4**2/(gah4 + ghp4 + gih4))/ch4, 0, 0],
[(gah5*gih5)/(ch5*(gah5 + ghp5 + gih5)), 0, 0, 0, 0, 0, (ghp5*gih5)/(ch5*(gah5 + ghp5 + gih5)), 0, 0, 0, 0, 0, 0, (-gih5 + gih5**2/(gah5 + ghp5 + gih5))/ch5, 0],
[(gah6*gih6)/(ch6*(gah6 + ghp6 + gih6)), 0, 0, 0, 0, 0, 0, (ghp6*gih6)/(ch6*(gah6 + ghp6 + gih6)), 0, 0, 0, 0, 0, 0, (-gih6 + gih6**2/(gah6 + ghp6 + gih6))/ch6] ])
B_sim = np.array([ [0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[1/ch1,0,0,0,0,0],
[0, 1/ch2,0,0,0,0],
[0,0, 1/ch3,0,0,0],
[0,0,0, 1/ch4,0,0],
[0,0,0,0, 1/ch5,0],
[0,0,0,0,0, 1/ch6] ])
C_sim = np.array([ [0, gsb2/(gps2 + gsb2), 0, gps2/(gps2 + gsb2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, gsb4/(gps4 + gsb4), 0, 0, 0, gps4/(gps4 + gsb4), 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, gsb7/(gps7 + gsb7), 0, 0, 0, 0, 0, 0, gps7/(gps7 + gsb7), 0, 0, 0, 0, 0, 0] ])
#simulation variables
t_end = 1000
dt = 0.001
timesteps = round(t_end/dt)
xvalues = np.zeros( (15, timesteps) )
times = np.arange(0, timesteps*dt, dt)
#set the initial conditions
xvalues[:,0:1] = np.array([ [15],[25],[25],[25],[25],[25],[25],[25],[25],[25],[25],[25],[25],[25],[25] ])
yvalues = np.zeros( (3, timesteps) )
yvalues[:,0:1] = np.dot(C_sim, xvalues[:,0:1]) #calculate output at t=0
u = np.zeros( (6,1) )
ambient = 15 + 2*np.sin((2*np.pi*times)/120)
#Define control loop matrices
#Gah
gah1 = 1
gah2 = 1
gah3 = 1
gah4 = 1
gah5 = 1
gah6 = 1
#ghp
ghp1 = 1
ghp2 = 1
ghp3 = 1
ghp4 = 1
ghp5 = 1
ghp6 = 1
#Gpb
gpb1 = 1
gpb2 = 1
gpb3 = 1
gpb4 = 1
gpb5 = 1
gpb6 = 1
gpb7 = 1
#gpa
gpa1 = 1
gpa2 = 1
gpa3 = 1
gpa4 = 1
gpa5 = 1
gpa6 = 1
gpa7 = 1
#Gps
gps1 = 1
gps2 = 1
gps3 = 1
gps4 = 1
gps5 = 1
gps6 = 1
gps7 = 1
#Gsb
gsb1 = 1
gsb2 = 1
gsb3 = 1
gsb4 = 1
gsb5 = 1
gsb6 = 1
gsb7 = 1
#Gih
gih1 = 1
gih2 = 1
gih3 = 1
gih4 = 1
gih5 = 1
gih6 = 1
#gnm
g12 = 1
g13 = 1
g14 = 1
g15 = 1
g16 = 1
g17 = 1
g23 = 1
g24 = 1
g25 = 1
g26 = 1
g27 = 1
g34 = 1
g35 = 1
g36 = 1
g37 = 1
g45 = 1
g46 = 1
g47 = 1
g56 = 1
g57 = 1
g67 = 1
#ch
ch1 = 1
ch2 = 1
ch3 = 1
ch4 = 1
ch5 = 1
ch6 = 1
#cp
cp1 = 1
cp2 = 1
cp3 = 1
cp4 = 1
cp5 = 1
cp6 = 1
cp7 = 1
#cb
cb = 1
#dt_damp
dt_damp = 1000
A_con= np.array([ [-1/dt_damp, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, (-gpb1 - gpb2 - gpb3 - gpb4 - gpb5 - gpb6 - gpb7 - gsb2 + gsb2**2/(gps2 + gsb2) - gsb4 + gsb4**2/(gps4 + gsb4) - gsb7 + gsb7**2/(gps7 + gsb7))/cb, gpb1/cb, (gpb2 + (gps2*gsb2)/(gps2 + gsb2))/cb, gpb3/cb, (gpb4 + (gps4*gsb4)/(gps4 + gsb4))/cb, gpb5/cb, gpb6/cb, (gpb7 + (gps7*gsb7)/(gps7 + gsb7))/cb,0,0,0,0,0,0],
[(((gah1*ghp1)/(gah1 + ghp1 + gih1) + gpa1))/cp1, (gpb1)/cp1, (-g12 - g14 - g15 - g16 - g17 - ghp1 + ghp1**2/(gah1 + ghp1 + gih1) - gpa1 - gpb1)/cp1, g12/cp1, 0, g14/cp1, g15/cp1, g16/cp1, g17/cp1, (ghp1*gih1)/(cp1*(gah1 + ghp1 + gih1)), 0, 0, 0, 0, 0],
[((gah2*ghp2)/(gah2 + ghp2 + gih2) + gpa2)/cp2, (gpb2 + (gps2*gsb2)/(gps2 + gsb2))/cp2, g12/cp2, (-g12 - g23 - g25 - g26 - ghp2 + ghp2**2/(gah2 + ghp2 + gih2) - gpa2 - gpb2 - gps2 + gps2**2/(gps2 + gsb2))/cp2, g23/cp2, 0, g25/cp2, g26/cp2, 0, 0, (ghp2*gih2)/(cp2*(gah2 + ghp2 + gih2)), 0, 0, 0, 0],
[((gah3*ghp3)/(gah3 + ghp3 + gih3) + gpa3)/cp3, gpb3/cp3, 0, g23/cp3, (-g23 - g34 - g35 - g36 - g37 - ghp3 + ghp3**2/(gah3 + ghp3 + gih3) - gpa3 - gpb3)/cp3, g34/cp3, g35/cp3, g36/cp3, g37/cp3, 0, 0, (ghp3*gih3)/(cp3*(gah3 + ghp3 + gih3)), 0, 0, 0],
[((gah4*ghp4)/(gah4 + ghp4 + gih4) + gpa4)/cp4, (gpb4 + (gps4*gsb4)/(gps4 + gsb4))/cp4, g14/cp4, 0, g34/cp4, (-g14 - g34 - g45 - g46 - g47 - ghp4 + ghp4**2/(gah4 + ghp4 + gih4) - gpa4 - gpb4 - gps4 + gps4**2/(gps4 + gsb4))/cp4, g45/cp4, g46/cp4, g47/cp4, 0, 0, 0, (ghp4*gih4)/(cp4*(gah4 + ghp4 + gih4)),0,0],
[((gah5*ghp5)/(gah5 + ghp5 + gih5) + gpa5)/cp5, gpb5/cp5, g15/cp5, g25/cp5, g35/cp5, g45/cp5, (-g15 - g25 - g35 - g45 - g57 - ghp5 + ghp5**2/(gah5 + ghp5 + gih5) - gpa5 - gpb5)/cp5, 0, g57/cp5, 0, 0, 0, 0, (ghp5*gih5)/(cp5*(gah5 + ghp5 + gih5)), 0],
[((gah6*ghp6)/(gah6 + ghp6 + gih6) + gpa6)/cp6, gpb6/cp6, g16/cp6, g26/cp6, g36/cp6, g46/cp6, 0, (-g16 - g26 - g36 - g46 - g67 - ghp6 + ghp6**2/(gah6 + ghp6 + gih6) - gpa6 - gpb6)/cp6, g67/cp6, 0, 0, 0, 0, 0, (ghp6*gih6)/(cp6*(gah6 + ghp6 + gih6))],
[0, (gpb7 + (gps7*gsb7)/(gps7 + gsb7))/cp7, g17/cp7, 0, g37/cp7, g47/cp7, g57/cp7, g67/cp7, (-g17 - g37 - g47 - g57 - g67 - gpb7 - gps7 + gps7**2/(gps7 + gsb7))/cp7, 0, 0, 0, 0, 0, 0],
[(gah1*gih1)/(ch1*(gah1 + ghp1 + gih1)), 0, (ghp1*gih1)/(ch1*(gah1 + ghp1 + gih1)), 0, 0, 0, 0, 0, 0, (-gih1 + gih1**2/(gah1 + ghp1 + gih1))/ch1, 0, 0, 0, 0, 0],
[(gah2*gih2)/(ch2*(gah2 + ghp2 + gih2)), 0, 0, (ghp2*gih2)/(ch2*(gah2 + ghp2 + gih2)), 0, 0, 0, 0, 0, 0, (-gih2 + gih2**2/(gah2 + ghp2 + gih2))/ch2, 0, 0, 0, 0],
[(gah3*gih3)/(ch3*(gah3 + ghp3 + gih3)), 0, 0, 0, (ghp3*gih3)/(ch3*(gah3 + ghp3 + gih3)), 0, 0, 0, 0, 0, 0, (-gih3 + gih3**2/(gah3 + ghp3 + gih3))/ch3, 0, 0, 0],
[(gah4*gih4)/(ch4*(gah4 + ghp4 + gih4)), 0, 0, 0, 0, (ghp4*gih4)/(ch4*(gah4 + ghp4 + gih4)), 0, 0, 0, 0, 0, 0, (-gih4 + gih4**2/(gah4 + ghp4 + gih4))/ch4, 0, 0],
[(gah5*gih5)/(ch5*(gah5 + ghp5 + gih5)), 0, 0, 0, 0, 0, (ghp5*gih5)/(ch5*(gah5 + ghp5 + gih5)), 0, 0, 0, 0, 0, 0, (-gih5 + gih5**2/(gah5 + ghp5 + gih5))/ch5, 0],
[(gah6*gih6)/(ch6*(gah6 + ghp6 + gih6)), 0, 0, 0, 0, 0, 0, (ghp6*gih6)/(ch6*(gah6 + ghp6 + gih6)), 0, 0, 0, 0, 0, 0, (-gih6 + gih6**2/(gah6 + ghp6 + gih6))/ch6] ])
B_con = np.array([ [0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[1/ch1,0,0,0,0,0],
[0, 1/ch2,0,0,0,0],
[0,0, 1/ch3,0,0,0],
[0,0,0, 1/ch4,0,0],
[0,0,0,0, 1/ch5,0],
[0,0,0,0,0, 1/ch6] ])
C_con = np.array([ [0, gsb2/(gps2 + gsb2), 0, gps2/(gps2 + gsb2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, gsb4/(gps4 + gsb4), 0, 0, 0, gps4/(gps4 + gsb4), 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, gsb7/(gps7 + gsb7), 0, 0, 0, 0, 0, 0, gps7/(gps7 + gsb7), 0, 0, 0, 0, 0, 0] ])
#lqg math
def main():
for step in range(1, timesteps):
xdot = np.dot(A_sim, xvalues[:, (step -1):step]) + np.dot(B_sim,u)
xvalues[:,step:(step + 1)] = xvalues[:, (step - 1):step] + xdot*dt
yvalues[:,step:(step + 1)] = np.dot(C_sim, xvalues[:, (step -1):step])
#set ambient fluctuation
xvalues[0,step] = ambient[step]
plt.plot(times, yvalues[1,:])
plt.show() | gpl-3.0 |
mrzl/ECO | src/python/nlp/original_2d_export.py | 2 | 3090 | import argparse
import pprint
import glob
import sys
import gensim
import util
import numpy
import json
import os
from sklearn.manifold import TSNE
def process_arguments(args):
parser = argparse.ArgumentParser(description='configure Word2Vec model building')
parser.add_argument('--model_path', action='store', help='the path to the model')
parser.add_argument('--txt_path', action='store', help='path containing text files which are all loaded')
parser.add_argument('--output_file', action='store', help='the text file to store all vectors in')
params = vars(parser.parse_args(args))
return params
class LineVectorCombination(object):
vector = 0
sentence = 0
if __name__ == '__main__':
params = process_arguments(sys.argv[1:])
input_path = params['model_path']
util.enable_verbose_training(sys.argv[0])
try:
model = gensim.models.Word2Vec.load_word2vec_format(input_path, binary=True)
# this raises an exception if the model type is different..
except Exception:
# just use the other mothod of loading..
model = gensim.models.Word2Vec.load(input_path)
txt_path = params['txt_path']
data_300d = []
originals = []
original_vectors = []
original_sentences = []
text_files = glob.glob(txt_path + '/*.txt')
for file in text_files:
line = 'loading file ' + str(text_files.index(file)) + '/' + str(len(text_files))
print(line)
index = 0
for line in open(file, 'r'):
vector_words = []
word_count = 0
for word in line.split():
try:
vector_words.append(model[word])
word_count += 1
except:
pass
# skip vocab unknown word
if word_count > 5:
vector = gensim.matutils.unitvec(numpy.array(vector_words).mean(axis=0))
combined = LineVectorCombination()
combined.sentence = line
combined.vector = vector
originals.append(combined)
original_vectors.append(vector)
original_sentences.append(line)
vlist = vector.tolist()
intlist = []
for number in vlist:
intnumber = int(number*10000)
intlist.append(intnumber)
data_300d.append({"sentence": line, "point": intlist})
index += 1
output_file = params['output_file']
# X = numpy.array(original_vectors)
# tsne = TSNE(n_components=2, learning_rate=200, perplexity=20, verbose=2).fit_transform(X)
#
# data_2d = []
# for i, f in enumerate(original_sentences):
# point = [(tsne[i, k] - numpy.min(tsne[:, k]))/(numpy.max(tsne[:, k]) - numpy.min(tsne[:, k])) for k in range(2)]
# data_2d.append({"sentence": os.path.abspath(original_sentences[i]), "point": point})
with open(output_file, 'w') as outfile:
#json.dump(data_2d, outfile)
json.dump(data_300d, outfile)
| apache-2.0 |
ctn-waterloo/nengo_theano | nengo_theano/templates/basalganglia.py | 1 | 4482 | def make(net, name='Basal Ganglia', dimensions=1, neurons=100,
tau_ampa=0.002, tau_gaba=0.008, output_weight=1,
radius=1.5):
"""This function creates a subnetwork with a model of the basal ganglia
based off the paper (Gurney, Prescott, & Redgrave, 2001)
NOTE: To match the basal ganglia template from Java Nengo, set pstc=.01
on connection to input ensemble.
:param NetWork net:
:param string name:
:param int dimensions:
:param int neurons:
:param float tau_ampa:
:param float tau_gaba:
:param float output_weight:
:param float radius:
:returns SubNetwork:
"""
netbg = net.make_subnetwork(name)
netbg.make('input', neurons=1, dimensions=dimensions, mode='direct')
netbg.make('output', neurons=1, dimensions=dimensions, mode='direct')
# connection weights from (Gurney, Prescott, & Redgrave, 2001)
mm=1; mp=1; me=1; mg=1
ws=1; wt=1; wm=1; wg=1; wp=0.9; we=0.3
e=0.2; ep=-0.25; ee=-0.2; eg=-0.2
le=0.2; lg=0.2
# create the necessary neural ensembles
#TODO: implement decoder_sign and set=1 for this population
netbg.make('StrD1', neurons=neurons, array_size=dimensions,
dimensions=1, intercept=(e,1), encoders=[[1]], radius=radius)
#TODO: implement decoder_sign and set=1 for this population
netbg.make('StrD2', neurons=neurons, array_size=dimensions,
dimensions=1, intercept=(e,1), encoders=[[1]], radius=radius)
#TODO: implement decoder_sign and set=1 for this population
netbg.make('STN', neurons=neurons, array_size=dimensions,
dimensions=1, intercept=(ep,1), encoders=[[1]], radius=radius)
#TODO: implement decoder_sign and set=1 for this population
netbg.make('GPi', neurons=neurons, array_size=dimensions,
dimensions=1, intercept=(eg,1), encoders=[[1]], radius=radius)
#TODO: implement decoder_sign and set=1 for this population
netbg.make('GPe', neurons=neurons, array_size=dimensions,
dimensions=1, intercept=(ee,1), encoders=[[1]], radius=radius)
# connect the input to the striatum and STN (excitatory)
netbg.connect('input', 'StrD1', weight=ws*(1+lg), pstc=tau_ampa)
netbg.connect('input', 'StrD2', weight=ws*(1-le), pstc=tau_ampa)
netbg.connect('input', 'STN', weight=wt, pstc=tau_ampa)
# connect the striatum to the GPi and GPe (inhibitory)
def func_str(x):
if x[0] < e: return 0
return mm * (x[0] - e)
netbg.connect('StrD1', 'GPi', func=func_str, weight=-wm, pstc=tau_gaba)
netbg.connect('StrD2', 'GPe', func=func_str, weight=-wm, pstc=tau_gaba)
# connect the STN to GPi and GPe (broad and excitatory)
def func_stn(x):
if x[0] < ep: return 0
return mp * (x[0] - ep)
tr = [[wp] * dimensions for i in range(dimensions)]
netbg.connect('STN', 'GPi', func=func_stn, transform=tr, pstc=tau_ampa)
netbg.connect('STN', 'GPe', func=func_stn, transform=tr, pstc=tau_ampa)
# connect the GPe to GPi and STN (inhibitory)
def func_gpe(x):
if x[0] < ee: return 0
return me * (x[0] - ee)
netbg.connect('GPe', 'GPi', func=func_gpe, weight=-we, pstc=tau_gaba)
netbg.connect('GPe', 'STN', func=func_gpe, weight=-wg, pstc=tau_gaba)
#connect GPi to output (inhibitory)
def func_gpi(x):
if x[0]<eg: return 0
return mg*(x[0]-eg)
netbg.connect('GPi', 'output', func=func_gpi, pstc=tau_gaba,
weight=output_weight)
def test_basalganglia():
import numpy as np
import matplotlib.pyplot as plt
import math
import nengo_theano as nef
from .. import templates
net = nef.Network('BG Test')
def func(x):
return [math.sin(x), .5,.2]
net.make_input('in', value=func)
templates.basalganglia.make(net=net, name='BG',
neurons=300, dimensions=3)
net.connect('in', 'BG.input', pstc=.01)
timesteps = 1000
dt_step = 0.01
t = np.linspace(dt_step, timesteps*dt_step, timesteps)
pstc = 0.01
Ip = net.make_probe('in', dt_sample=dt_step, pstc=pstc)
BGp = net.make_probe('BG.output', dt_sample=dt_step, pstc=pstc)
print "starting simulation"
net.run(timesteps*dt_step)
# plot the results
plt.ioff(); plt.close();
plt.subplot(2,1,1)
plt.plot(t, Ip.get_data(), 'x'); plt.title('Input')
plt.subplot(2,1,2)
plt.plot(BGp.get_data()); plt.title('BG.output')
plt.tight_layout()
plt.show()
| mit |
PytLab/catplot | catplot/grid_components/grid_canvas.py | 1 | 14808 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Module for grid plotting canvas.
"""
from collections import namedtuple
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from catplot.canvas import Canvas
from catplot.grid_components import extract_plane
from catplot.grid_components.nodes import Node2D, Node3D
from catplot.grid_components.edges import Edge2D, Arrow2D, Edge3D
from catplot.grid_components.supercell import SuperCell2D, SuperCell3D
from catplot.grid_components.planes import Plane3D
class Grid2DCanvas(Canvas):
""" Canvas for 2D grid plotting.
"""
def __init__(self, **kwargs):
super(Grid2DCanvas, self).__init__(**kwargs)
self._set_axes()
# Equalize the scale of x and y axis.
self.axes.set_aspect("equal")
# Attributes for 2D grid canvas.
self.nodes = []
self.edges = []
self.arrows = []
self.supercells = []
def add_supercell(self, supercell):
""" Add a supercell to 2D grid canvas.
"""
if not isinstance(supercell, SuperCell2D):
raise ValueError("supercell must be a SuperCell2D object")
self.supercells.append(supercell)
self.nodes.extend(supercell.nodes)
self.edges.extend(supercell.edges)
self.arrows.extend(supercell.arrows)
def add_supercells(self, supercells):
""" Add multiple supercells to 2D grid canvas.
"""
for sc in supercells:
self.add_supercell(sc)
def add_node(self, node):
""" Add a node to grid canvas.
"""
# Check node.
if not isinstance(node, Node2D):
raise ValueError("node must be a Node2D object")
self.nodes.append(node)
def add_nodes(self, nodes):
""" Add multiple nodes to canvas.
"""
for node in nodes:
self.add_node(node)
def add_edge(self, edge):
""" Add a edge to grid canvas.
"""
if not isinstance(edge, Edge2D):
raise ValueError("edge must be an Edge2D object")
if isinstance(edge, Arrow2D):
self.arrows.append(edge)
else:
self.edges.append(edge)
def add_edges(self, edges):
""" Add multiple edges to canvas.
"""
for edge in edges:
self.add_edge(edge)
@property
def node_coordinates(self):
""" Coordinates for all nodes.
"""
return np.array([node.coordinate.tolist() for node in self.nodes])
@property
def node_edgecolors(self):
""" Color codes for node edges.
"""
return [node.edgecolor for node in self.nodes]
@property
def node_colors(self):
""" Colors for all nodes.
"""
return [node.color for node in self.nodes]
@property
def edge_coordinates(self):
""" Coordiantes for all edges.
"""
if not self.edges:
return []
else:
x = np.concatenate([edge.x for edge in self.edges])
y = np.concatenate([edge.y for edge in self.edges])
return np.array(list(zip(x, y)))
@property
def arrow_colors(self):
""" Colors for all arrows.
"""
return [arrow.color for arrow in self.arrows]
@property
def arrow_coordinates(self):
""" Coordinates for all arrows.
"""
if not self.arrows:
return []
else:
x = np.concatenate([arrow.x for arrow in self.arrows])
y = np.concatenate([arrow.y for arrow in self.arrows])
return np.array(list(zip(x, y)))
def _get_data_limits(self):
""" Private helper function to get the limits of data.
"""
node_x = self.node_coordinates[:, 0] if self.nodes else []
edge_x = self.edge_coordinates[:, 0] if self.edges else []
arrow_x = self.arrow_coordinates[:, 0] if self.arrows else []
x = np.concatenate([node_x, edge_x, arrow_x])
max_x, min_x = np.max(x), np.min(x)
node_y = self.node_coordinates[:, 1] if self.nodes else []
edge_y = self.edge_coordinates[:, 1] if self.edges else []
arrow_y = self.arrow_coordinates[:, 1] if self.arrows else []
y = np.concatenate([node_y, edge_y, arrow_y])
max_y, min_y = np.max(y), np.min(y)
return self._limits(max_x, min_x, max_y, min_y)
def draw(self):
""" Draw all nodes, edges and arrows on canvas.
"""
if not any([self.nodes, self.edges, self.arrows]):
self._logger.warning("Attempted to draw in an empty canvas")
return
# Add edges to canvas.
for edge in self.edges:
self.axes.add_line(edge.line2d())
for arrow in self.arrows:
self.axes.arrow(*arrow.start, dx=arrow.dx, dy=arrow.dy,
length_includes_head=True,
head_width=arrow.head_width,
head_length=arrow.head_length,
shape=arrow.shape,
alpha=arrow.alpha,
linewidth=arrow.width,
color=arrow.color,
linestyle=arrow.style,
zorder=arrow.zorder)
# Add nodes to canvas one by one.
for node in self.nodes:
self.axes.scatter(*node.coordinate,
color=node.color,
edgecolors=node.edgecolor,
marker=node.style,
alpha=node.alpha,
s=node.size,
linewidth=node.line_width,
linestyle=node.line_style,
zorder=node.zorder)
# Set axes limits.
limits = self._get_data_limits()
self.axes.set_xlim(limits.min_x, limits.max_x)
self.axes.set_ylim(limits.min_y, limits.max_y)
def redraw(self):
""" Clear the canvas and draw all components again.
"""
self.clear()
self.draw()
def clear(self):
""" Clear components drawned in canvas.
"""
self.axes.clear()
def deep_clear(self):
""" Clear all components in canvas.
"""
self.clear()
self.nodes = []
self.edges = []
self.arrows = []
self.supercells = []
@extract_plane
def to3d(self, canvas3d, **kwargs):
""" Convert the 2D canvas to a 3D canvas.
Parameters:
-----------
plane: str, which plane components in 2D canvas will be mapped to.
The value could be "xy", "xz" or "yz".
Others in kwargs is the same with `Grid2DCanvas()`.
"""
plane = kwargs.pop("plane")
# Map all components.
nodes = [n.to3d(plane=plane) for n in self.nodes]
edges = [e.to3d(plane=plane) for e in self.edges]
supercells = [s.to3d(plane=plane) for s in self.supercells]
if not isinstance(canvas3d, Grid3DCanvas):
raise ValueError("canvas3d must be a Grid3DCanvas object")
canvas3d.add_nodes(nodes)
canvas3d.add_edges(edges)
# NOTE: don't use add_supercells here !!!
canvas3d.supercells.extend(supercells)
return canvas3d
class Grid3DCanvas(Grid2DCanvas):
""" Canvas for 3D grid plotting.
"""
def __init__(self, **kwargs):
# NOTE: here we call the method in Canvas NOT Grid2DCanvas.
super(Grid2DCanvas, self).__init__(**kwargs)
self.z_ticks = kwargs.pop("z_ticks", None)
# Figure has been created in base class constructor.
# Add an axes to figure.
self.axes = self.figure.add_subplot(111, projection="3d",
facecolor=self.facecolor)
# Change the spine color of axes.
if self.edgecolor:
for child in self.axes.get_children():
if isinstance(child, Spine):
child.set_color(self.edgecolor)
# Set axes ticks.
if self.x_ticks is not None:
self.axes.set_xticks(self.x_ticks)
if self.y_ticks is not None:
self.axes.set_yticks(self.y_ticks)
if self.z_ticks is not None:
self.axes.set_zticks(self.z_ticks)
self.axes.set_aspect("equal")
# Attributes for 3D canvas.
self.nodes = []
self.edges = []
self.supercells = []
self.arrows = [] # Just a placeholder here.
self.planes = []
def _limits(self, max_x, min_x, max_y, min_y, max_z, min_z):
""" Override parent's function to get 3D data limits.
Parameters:
-----------
max_x: float, the maximum of x values.
min_x: float, the minimum of x values.
max_y: float, the maximum of y values.
min_y: float, the minimum of y values.
max_z: float, the maximum of z values.
min_z: float, the minimum of z values.
"""
scale_x = max_x - min_x
scale_y = max_y - min_y
scale_z = max_z - min_z
# Define a namedtuple to be returned.
Limits = namedtuple("Limits", ["max_x", "min_x", "max_y", "min_y", "max_z", "min_z"])
limits = [max_x + self.margin_ratio*scale_x,
min_x - self.margin_ratio*scale_x,
max_y + self.margin_ratio*scale_y,
min_y - self.margin_ratio*scale_y,
max_z + self.margin_ratio*scale_z,
min_z - self.margin_ratio*scale_z]
return Limits._make(limits)
def _get_data_limits(self):
""" Get limits for all data in canvas.
"""
node_x = self.node_coordinates[:, 0] if self.nodes else []
edge_x = self.edge_coordinates[:, 0] if self.edges else []
plane_x = (np.concatenate([np.concatenate(plane.x) for plane in self.planes])
if self.planes else [])
x = np.concatenate([node_x, edge_x, plane_x])
max_x, min_x = np.max(x), np.min(x)
node_y = self.node_coordinates[:, 1] if self.nodes else []
edge_y = self.edge_coordinates[:, 1] if self.edges else []
plane_y = (np.concatenate([np.concatenate(plane.y) for plane in self.planes])
if self.planes else [])
y = np.concatenate([node_y, edge_y, plane_y])
max_y, min_y = np.max(y), np.min(y)
node_z = self.node_coordinates[:, 2] if self.nodes else []
edge_z = self.edge_coordinates[:, 2] if self.edges else []
plane_z = (np.concatenate([np.concatenate(plane.z) for plane in self.planes])
if self.planes else [])
z = np.concatenate([node_z, edge_z, plane_z])
max_z, min_z = np.max(z), np.min(z)
return self._limits(max_x, min_x, max_y, min_y, max_z, min_z)
def add_node(self, node):
""" Add a 3D node to 3D grid canvas.
"""
if not isinstance(node, Node3D):
raise ValueError("node must be a Node3D object")
self.nodes.append(node)
def add_edge(self, edge):
""" Add a 3D edge to canvas.
"""
if not isinstance(edge, Edge3D):
raise ValueError("edge must be an Edge3D object")
self.edges.append(edge)
def add_supercell(self, supercell):
""" Add a supercell to 3D grid canvas.
"""
if not isinstance(supercell, SuperCell3D):
raise ValueError("supercell must be a SuperCell3D object")
self.supercells.append(supercell)
self.nodes.extend(supercell.nodes)
self.edges.extend(supercell.edges)
def add_plane(self, plane):
""" Add a 3D plane to canvas.
"""
if not isinstance(plane, Plane3D):
raise ValueError("plane must be an Plane3D object")
self.planes.append(plane)
def add_planes(self, planes):
""" Add multiple planes to canvas.
"""
for plane in planes:
self.add_plane(plane)
@property
def edge_coordinates(self):
""" Coordinates for all edges in 3D grid canvas.
"""
if not self.edges:
return []
else:
x = np.concatenate([edge.x for edge in self.edges])
y = np.concatenate([edge.y for edge in self.edges])
z = np.concatenate([edge.z for edge in self.edges])
return np.array(list(zip(x, y, z)))
def draw(self):
""" Draw all nodes and edges on 3D canvas.
"""
if not any([self.nodes, self.edges, self.planes]):
self._logger.warning("Attempted to draw in an empty canvas")
return
# Add nodes to canvas one by one.
for node in self.nodes:
self.axes.scatter(*node.coordinate,
zdir=node.zdir,
s=node.size,
c=node.color,
depthshade=node.depthshade,
edgecolor=node.edgecolor,
marker=node.style,
alpha=node.alpha,
linewidth=node.line_width,
zorder=node.zorder)
# Add edges to canvas.
for edge in self.edges:
self.axes.plot(edge.x, edge.y, edge.z,
zdir=edge.zdir,
linewidth=edge.width,
color=edge.color,
linestyle=edge.style,
alpha=edge.alpha,
zorder=edge.zorder)
# Add plane to canvas.
for plane in self.planes:
self.axes.plot_surface(plane.x, plane.y, plane.z,
facecolor=plane.color,
edgecolor=plane.edgecolor,
alpha=plane.alpha,
shade=plane.shade)
# Set axes limits.
limits = self._get_data_limits()
self.axes.set_xlim(limits.min_x, limits.max_x)
self.axes.set_ylim(limits.min_y, limits.max_y)
self.axes.set_zlim(limits.min_z, limits.max_z)
def clear(self):
""" Clear 3D axes.
"""
self.axes.clear()
def deep_clear(self):
""" Clear all components in canvas.
"""
self.clear()
self.nodes = []
self.edges = []
self.arrows = []
self.supercells = []
self.planes = []
def redraw(self):
""" Clear the canvas and draw all components agian.
"""
self.clear()
self.draw()
| mit |
stonebig/bokeh | bokeh/core/tests/test_json_encoder.py | 2 | 10746 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from collections import deque
import datetime as dt
import decimal
# External imports
import dateutil.relativedelta as rd
import numpy as np
from six import string_types
# Bokeh imports
from bokeh.colors import RGB
from bokeh.core.has_props import HasProps
from bokeh.core.properties import Int, String
from bokeh.models import Range1d
# Module under test
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class HP(HasProps):
foo = Int(default=10)
bar = String()
class TestBokehJSONEncoder(object):
def setup_method(self, test_method):
from bokeh.core.json_encoder import BokehJSONEncoder
self.encoder = BokehJSONEncoder()
def test_fail(self):
with pytest.raises(TypeError):
self.encoder.default({'testing': 1})
def test_panda_series(self, pd):
s = pd.Series([1, 3, 5, 6, 8])
assert self.encoder.default(s) == [1, 3, 5, 6, 8]
def test_numpyarray(self):
a = np.arange(5)
assert self.encoder.default(a) == [0, 1, 2, 3, 4]
def test_numpyint(self):
npint = np.asscalar(np.int64(1))
assert self.encoder.default(npint) == 1
assert isinstance(self.encoder.default(npint), int)
def test_numpyfloat(self):
npfloat = np.float64(1.33)
assert self.encoder.default(npfloat) == 1.33
assert isinstance(self.encoder.default(npfloat), float)
def test_numpybool_(self):
nptrue = np.bool_(True)
assert self.encoder.default(nptrue) == True
assert isinstance(self.encoder.default(nptrue), bool)
def test_numpydatetime64(self):
npdt64 = np.datetime64('2017-01-01')
assert self.encoder.default(npdt64) == 1483228800000.0
assert isinstance(self.encoder.default(npdt64), float)
def test_time(self):
dttime = dt.time(12, 32, 15)
assert self.encoder.default(dttime) == 45135000.0
assert isinstance(self.encoder.default(dttime), float)
def test_relativedelta(self):
rdelt = rd.relativedelta()
assert isinstance(self.encoder.default(rdelt), dict)
def test_decimal(self):
dec = decimal.Decimal(20.3)
assert self.encoder.default(dec) == 20.3
assert isinstance(self.encoder.default(dec), float)
def test_model(self):
m = Range1d(start=10, end=20)
assert self.encoder.default(m) == m.ref
assert isinstance(self.encoder.default(m), dict)
def test_hasprops(self):
hp = HP()
assert self.encoder.default(hp) == {}
assert isinstance(self.encoder.default(hp), dict)
hp.foo = 15
assert self.encoder.default(hp) == {'foo': 15}
assert isinstance(self.encoder.default(hp), dict)
hp.bar = "test"
assert self.encoder.default(hp) == {'foo': 15, 'bar': 'test'}
assert isinstance(self.encoder.default(hp), dict)
def test_color(self):
c = RGB(16, 32, 64)
assert self.encoder.default(c) == "rgb(16, 32, 64)"
assert isinstance(self.encoder.default(c), string_types)
c = RGB(16, 32, 64, 0.1)
assert self.encoder.default(c) == "rgba(16, 32, 64, 0.1)"
assert isinstance(self.encoder.default(c), string_types)
def test_slice(self):
c = slice(2)
assert self.encoder.default(c) == dict(start=None, stop=2, step=None)
assert isinstance(self.encoder.default(c), dict)
c = slice(0,2)
assert self.encoder.default(c) == dict(start=0, stop=2, step=None)
assert isinstance(self.encoder.default(c), dict)
c = slice(0, 10, 2)
assert self.encoder.default(c) == dict(start=0, stop=10, step=2)
assert isinstance(self.encoder.default(c), dict)
c = slice(0, None, 2)
assert self.encoder.default(c) == dict(start=0, stop=None, step=2)
assert isinstance(self.encoder.default(c), dict)
c = slice(None, None, None)
assert self.encoder.default(c) == dict(start=None, stop=None, step=None)
assert isinstance(self.encoder.default(c), dict)
def test_pd_timestamp(self, pd):
ts = pd.Timestamp('April 28, 1948')
assert self.encoder.default(ts) == -684115200000
class TestSerializeJson(object):
def setup_method(self, test_method):
from bokeh.core.json_encoder import serialize_json
from json import loads
self.serialize = serialize_json
self.deserialize = loads
def test_with_basic(self):
assert self.serialize({'test': [1, 2, 3]}) == '{"test":[1,2,3]}'
def test_pretty(self):
assert self.serialize({'test': [1, 2, 3]}, pretty=True) == '{\n "test": [\n 1,\n 2,\n 3\n ]\n}'
def test_with_np_array(self):
a = np.arange(5)
assert self.serialize(a) == '[0,1,2,3,4]'
def test_with_pd_series(self, pd):
s = pd.Series([0, 1, 2, 3, 4])
assert self.serialize(s) == '[0,1,2,3,4]'
def test_nans_and_infs(self):
arr = np.array([np.nan, np.inf, -np.inf, 0])
serialized = self.serialize(arr)
deserialized = self.deserialize(serialized)
assert deserialized[0] == 'NaN'
assert deserialized[1] == 'Infinity'
assert deserialized[2] == '-Infinity'
assert deserialized[3] == 0
def test_nans_and_infs_pandas(self, pd):
arr = pd.Series(np.array([np.nan, np.inf, -np.inf, 0]))
serialized = self.serialize(arr)
deserialized = self.deserialize(serialized)
assert deserialized[0] == 'NaN'
assert deserialized[1] == 'Infinity'
assert deserialized[2] == '-Infinity'
assert deserialized[3] == 0
def test_pandas_datetime_types(self, pd):
""" should convert to millis """
idx = pd.date_range('2001-1-1', '2001-1-5')
df = pd.DataFrame({'vals' :idx}, index=idx)
serialized = self.serialize({'vals' : df.vals,
'idx' : df.index})
deserialized = self.deserialize(serialized)
baseline = {u'vals': [978307200000,
978393600000,
978480000000,
978566400000,
978652800000],
u'idx': [978307200000,
978393600000,
978480000000,
978566400000,
978652800000]
}
assert deserialized == baseline
def test_builtin_datetime_types(self):
""" should convert to millis as-is """
DT_EPOCH = dt.datetime.utcfromtimestamp(0)
a = dt.date(2016, 4, 28)
b = dt.datetime(2016, 4, 28, 2, 20, 50)
serialized = self.serialize({'a' : [a],
'b' : [b]})
deserialized = self.deserialize(serialized)
baseline = {u'a': [(dt.datetime(*a.timetuple()[:6]) - DT_EPOCH).total_seconds() * 1000],
u'b': [(b - DT_EPOCH).total_seconds() * 1000. + b.microsecond / 1000.],
}
assert deserialized == baseline
# test pre-computed values too
assert deserialized == {
u'a': [1461801600000.0], u'b': [1461810050000.0]
}
def test_builtin_timedelta_types(self):
""" should convert time delta to a dictionary """
delta = dt.timedelta(days=42, seconds=1138, microseconds=1337)
serialized = self.serialize(delta)
deserialized = self.deserialize(serialized)
assert deserialized == delta.total_seconds() * 1000
def test_numpy_timedelta_types(self):
delta = np.timedelta64(3000, 'ms')
serialized = self.serialize(delta)
deserialized = self.deserialize(serialized)
assert deserialized == 3000
delta = np.timedelta64(3000, 's')
serialized = self.serialize(delta)
deserialized = self.deserialize(serialized)
assert deserialized == 3000000
def test_pandas_timedelta_types(self, pd):
delta = pd.Timedelta("3000ms")
serialized = self.serialize(delta)
deserialized = self.deserialize(serialized)
assert deserialized == 3000
def test_deque(self):
"""Test that a deque is deserialized as a list."""
assert self.serialize(deque([0, 1, 2])) == '[0,1,2]'
def test_slice(self):
"""Test that a slice is deserialized as a list."""
assert self.serialize(slice(2)) == '{"start":null,"step":null,"stop":2}'
assert self.serialize(slice(0, 2)) == '{"start":0,"step":null,"stop":2}'
assert self.serialize(slice(0, 10, 2)) == '{"start":0,"step":2,"stop":10}'
assert self.serialize(slice(0, None, 2)) == '{"start":0,"step":2,"stop":null}'
assert self.serialize(slice(None, None, None)) == '{"start":null,"step":null,"stop":null}'
def test_bad_kwargs(self):
with pytest.raises(ValueError):
self.serialize([1], allow_nan=True)
with pytest.raises(ValueError):
self.serialize([1], separators=("a", "b"))
with pytest.raises(ValueError):
self.serialize([1], sort_keys=False)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause |
rohit21122012/DCASE2013 | runs/2016/dnn2016med_traps/traps26/task1_scene_classification.py | 40 | 38423 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# DCASE 2016::Acoustic Scene Classification / Baseline System
import argparse
import textwrap
import timeit
import skflow
from sklearn import mixture
from sklearn import preprocessing as pp
from sklearn.externals import joblib
from sklearn.metrics import confusion_matrix
from src.dataset import *
from src.evaluation import *
from src.features import *
__version_info__ = ('1', '0', '0')
__version__ = '.'.join(__version_info__)
final_result = {}
train_start = 0.0
train_end = 0.0
test_start = 0.0
test_end = 0.0
def main(argv):
numpy.random.seed(123456) # let's make randomization predictable
tot_start = timeit.default_timer()
parser = argparse.ArgumentParser(
prefix_chars='-+',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
DCASE 2016
Task 1: Acoustic Scene Classification
Baseline system
---------------------------------------------
Tampere University of Technology / Audio Research Group
Author: Toni Heittola ( [email protected] )
System description
This is an baseline implementation for D-CASE 2016 challenge acoustic scene classification task.
Features: MFCC (static+delta+acceleration)
Classifier: GMM
'''))
# Setup argument handling
parser.add_argument("-development", help="Use the system in the development mode", action='store_true',
default=False, dest='development')
parser.add_argument("-challenge", help="Use the system in the challenge mode", action='store_true',
default=False, dest='challenge')
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)
args = parser.parse_args()
# Load parameters from config file
parameter_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.path.splitext(os.path.basename(__file__))[0] + '.yaml')
params = load_parameters(parameter_file)
params = process_parameters(params)
make_folders(params)
title("DCASE 2016::Acoustic Scene Classification / Baseline System")
# Check if mode is defined
if not (args.development or args.challenge):
args.development = True
args.challenge = False
dataset_evaluation_mode = 'folds'
if args.development and not args.challenge:
print "Running system in development mode"
dataset_evaluation_mode = 'folds'
elif not args.development and args.challenge:
print "Running system in challenge mode"
dataset_evaluation_mode = 'full'
# Get dataset container class
dataset = eval(params['general']['development_dataset'])(data_path=params['path']['data'])
# Fetch data over internet and setup the data
# ==================================================
if params['flow']['initialize']:
dataset.fetch()
# Extract features for all audio files in the dataset
# ==================================================
if params['flow']['extract_features']:
section_header('Feature extraction')
# Collect files in train sets
files = []
for fold in dataset.folds(mode=dataset_evaluation_mode):
for item_id, item in enumerate(dataset.train(fold)):
if item['file'] not in files:
files.append(item['file'])
for item_id, item in enumerate(dataset.test(fold)):
if item['file'] not in files:
files.append(item['file'])
files = sorted(files)
# Go through files and make sure all features are extracted
do_feature_extraction(files=files,
dataset=dataset,
feature_path=params['path']['features'],
params=params['features'],
overwrite=params['general']['overwrite'])
foot()
# Prepare feature normalizers
# ==================================================
if params['flow']['feature_normalizer']:
section_header('Feature normalizer')
do_feature_normalization(dataset=dataset,
feature_normalizer_path=params['path']['feature_normalizers'],
feature_path=params['path']['features'],
dataset_evaluation_mode=dataset_evaluation_mode,
overwrite=params['general']['overwrite'])
foot()
# System training
# ==================================================
if params['flow']['train_system']:
section_header('System training')
train_start = timeit.default_timer()
do_system_training(dataset=dataset,
model_path=params['path']['models'],
feature_normalizer_path=params['path']['feature_normalizers'],
feature_path=params['path']['features'],
classifier_params=params['classifier']['parameters'],
classifier_method=params['classifier']['method'],
dataset_evaluation_mode=dataset_evaluation_mode,
overwrite=params['general']['overwrite']
)
train_end = timeit.default_timer()
foot()
# System evaluation in development mode
if args.development and not args.challenge:
# System testing
# ==================================================
if params['flow']['test_system']:
section_header('System testing')
test_start = timeit.default_timer()
do_system_testing(dataset=dataset,
feature_path=params['path']['features'],
result_path=params['path']['results'],
model_path=params['path']['models'],
feature_params=params['features'],
dataset_evaluation_mode=dataset_evaluation_mode,
classifier_method=params['classifier']['method'],
overwrite=params['general']['overwrite']
)
test_end = timeit.default_timer()
foot()
# System evaluation
# ==================================================
if params['flow']['evaluate_system']:
section_header('System evaluation')
do_system_evaluation(dataset=dataset,
dataset_evaluation_mode=dataset_evaluation_mode,
result_path=params['path']['results'])
foot()
# System evaluation with challenge data
elif not args.development and args.challenge:
# Fetch data over internet and setup the data
challenge_dataset = eval(params['general']['challenge_dataset'])()
if params['flow']['initialize']:
challenge_dataset.fetch()
# System testing
if params['flow']['test_system']:
section_header('System testing with challenge data')
do_system_testing(dataset=challenge_dataset,
feature_path=params['path']['features'],
result_path=params['path']['challenge_results'],
model_path=params['path']['models'],
feature_params=params['features'],
dataset_evaluation_mode=dataset_evaluation_mode,
classifier_method=params['classifier']['method'],
overwrite=True
)
foot()
print " "
print "Your results for the challenge data are stored at [" + params['path']['challenge_results'] + "]"
print " "
tot_end = timeit.default_timer()
print " "
print "Train Time : " + str(train_end - train_start)
print " "
print " "
print "Test Time : " + str(test_end - test_start)
print " "
print " "
print "Total Time : " + str(tot_end - tot_start)
print " "
final_result['train_time'] = train_end - train_start
final_result['test_time'] = test_end - test_start
final_result['tot_time'] = tot_end - tot_start
joblib.dump(final_result, 'result.pkl')
return 0
def process_parameters(params):
"""Parameter post-processing.
Parameters
----------
params : dict
parameters in dict
Returns
-------
params : dict
processed parameters
"""
# Convert feature extraction window and hop sizes seconds to samples
params['features']['mfcc']['win_length'] = int(params['features']['win_length_seconds'] * params['features']['fs'])
params['features']['mfcc']['hop_length'] = int(params['features']['hop_length_seconds'] * params['features']['fs'])
# Copy parameters for current classifier method
params['classifier']['parameters'] = params['classifier_parameters'][params['classifier']['method']]
# Hash
params['features']['hash'] = get_parameter_hash(params['features'])
params['classifier']['hash'] = get_parameter_hash(params['classifier'])
# Paths
params['path']['data'] = os.path.join(os.path.dirname(os.path.realpath(__file__)), params['path']['data'])
params['path']['base'] = os.path.join(os.path.dirname(os.path.realpath(__file__)), params['path']['base'])
# Features
params['path']['features_'] = params['path']['features']
params['path']['features'] = os.path.join(params['path']['base'],
params['path']['features'],
params['features']['hash'])
# Feature normalizers
params['path']['feature_normalizers_'] = params['path']['feature_normalizers']
params['path']['feature_normalizers'] = os.path.join(params['path']['base'],
params['path']['feature_normalizers'],
params['features']['hash'])
# Models
params['path']['models_'] = params['path']['models']
params['path']['models'] = os.path.join(params['path']['base'],
params['path']['models'],
params['features']['hash'], params['classifier']['hash'])
# Results
params['path']['results_'] = params['path']['results']
params['path']['results'] = os.path.join(params['path']['base'],
params['path']['results'],
params['features']['hash'], params['classifier']['hash'])
return params
def make_folders(params, parameter_filename='parameters.yaml'):
"""Create all needed folders, and saves parameters in yaml-file for easier manual browsing of data.
Parameters
----------
params : dict
parameters in dict
parameter_filename : str
filename to save parameters used to generate the folder name
Returns
-------
nothing
"""
# Check that target path exists, create if not
check_path(params['path']['features'])
check_path(params['path']['feature_normalizers'])
check_path(params['path']['models'])
check_path(params['path']['results'])
# Save parameters into folders to help manual browsing of files.
# Features
feature_parameter_filename = os.path.join(params['path']['features'], parameter_filename)
if not os.path.isfile(feature_parameter_filename):
save_parameters(feature_parameter_filename, params['features'])
# Feature normalizers
feature_normalizer_parameter_filename = os.path.join(params['path']['feature_normalizers'], parameter_filename)
if not os.path.isfile(feature_normalizer_parameter_filename):
save_parameters(feature_normalizer_parameter_filename, params['features'])
# Models
model_features_parameter_filename = os.path.join(params['path']['base'],
params['path']['models_'],
params['features']['hash'],
parameter_filename)
if not os.path.isfile(model_features_parameter_filename):
save_parameters(model_features_parameter_filename, params['features'])
model_models_parameter_filename = os.path.join(params['path']['base'],
params['path']['models_'],
params['features']['hash'],
params['classifier']['hash'],
parameter_filename)
if not os.path.isfile(model_models_parameter_filename):
save_parameters(model_models_parameter_filename, params['classifier'])
# Results
# Save parameters into folders to help manual browsing of files.
result_features_parameter_filename = os.path.join(params['path']['base'],
params['path']['results_'],
params['features']['hash'],
parameter_filename)
if not os.path.isfile(result_features_parameter_filename):
save_parameters(result_features_parameter_filename, params['features'])
result_models_parameter_filename = os.path.join(params['path']['base'],
params['path']['results_'],
params['features']['hash'],
params['classifier']['hash'],
parameter_filename)
if not os.path.isfile(result_models_parameter_filename):
save_parameters(result_models_parameter_filename, params['classifier'])
def get_feature_filename(audio_file, path, extension='cpickle'):
"""Get feature filename
Parameters
----------
audio_file : str
audio file name from which the features are extracted
path : str
feature path
extension : str
file extension
(Default value='cpickle')
Returns
-------
feature_filename : str
full feature filename
"""
audio_filename = os.path.split(audio_file)[1]
return os.path.join(path, os.path.splitext(audio_filename)[0] + '.' + extension)
def get_feature_normalizer_filename(fold, path, extension='cpickle'):
"""Get normalizer filename
Parameters
----------
fold : int >= 0
evaluation fold number
path : str
normalizer path
extension : str
file extension
(Default value='cpickle')
Returns
-------
normalizer_filename : str
full normalizer filename
"""
return os.path.join(path, 'scale_fold' + str(fold) + '.' + extension)
def get_model_filename(fold, path, extension='cpickle'):
"""Get model filename
Parameters
----------
fold : int >= 0
evaluation fold number
path : str
model path
extension : str
file extension
(Default value='cpickle')
Returns
-------
model_filename : str
full model filename
"""
return os.path.join(path, 'model_fold' + str(fold) + '.' + extension)
def get_result_filename(fold, path, extension='txt'):
"""Get result filename
Parameters
----------
fold : int >= 0
evaluation fold number
path : str
result path
extension : str
file extension
(Default value='cpickle')
Returns
-------
result_filename : str
full result filename
"""
if fold == 0:
return os.path.join(path, 'results.' + extension)
else:
return os.path.join(path, 'results_fold' + str(fold) + '.' + extension)
def do_feature_extraction(files, dataset, feature_path, params, overwrite=False):
"""Feature extraction
Parameters
----------
files : list
file list
dataset : class
dataset class
feature_path : str
path where the features are saved
params : dict
parameter dict
overwrite : bool
overwrite existing feature files
(Default value=False)
Returns
-------
nothing
Raises
-------
IOError
Audio file not found.
"""
# Check that target path exists, create if not
check_path(feature_path)
for file_id, audio_filename in enumerate(files):
# Get feature filename
current_feature_file = get_feature_filename(audio_file=os.path.split(audio_filename)[1], path=feature_path)
progress(title_text='Extracting',
percentage=(float(file_id) / len(files)),
note=os.path.split(audio_filename)[1])
if not os.path.isfile(current_feature_file) or overwrite:
# Load audio data
if os.path.isfile(dataset.relative_to_absolute_path(audio_filename)):
y, fs = load_audio(filename=dataset.relative_to_absolute_path(audio_filename), mono=True,
fs=params['fs'])
else:
raise IOError("Audio file not found [%s]" % audio_filename)
# Extract features
if params['method'] == 'lfcc':
feature_file_txt = get_feature_filename(audio_file=os.path.split(audio_filename)[1],
path=feature_path,
extension='txt')
feature_data = feature_extraction_lfcc(feature_file_txt)
elif params['method'] == 'traps':
feature_data = feature_extraction_traps(y=y,
fs=fs,
traps_params=params['traps'],
mfcc_params=params['mfcc'])
else:
# feature_data['feat'].shape is (1501, 60)
feature_data = feature_extraction(y=y,
fs=fs,
include_mfcc0=params['include_mfcc0'],
include_delta=params['include_delta'],
include_acceleration=params['include_acceleration'],
mfcc_params=params['mfcc'],
delta_params=params['mfcc_delta'],
acceleration_params=params['mfcc_acceleration'])
# Save
save_data(current_feature_file, feature_data)
def do_feature_normalization(dataset, feature_normalizer_path, feature_path, dataset_evaluation_mode='folds',
overwrite=False):
"""Feature normalization
Calculated normalization factors for each evaluation fold based on the training material available.
Parameters
----------
dataset : class
dataset class
feature_normalizer_path : str
path where the feature normalizers are saved.
feature_path : str
path where the features are saved.
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
overwrite : bool
overwrite existing normalizers
(Default value=False)
Returns
-------
nothing
Raises
-------
IOError
Feature file not found.
"""
# Check that target path exists, create if not
check_path(feature_normalizer_path)
for fold in dataset.folds(mode=dataset_evaluation_mode):
current_normalizer_file = get_feature_normalizer_filename(fold=fold, path=feature_normalizer_path)
if not os.path.isfile(current_normalizer_file) or overwrite:
# Initialize statistics
file_count = len(dataset.train(fold))
normalizer = FeatureNormalizer()
for item_id, item in enumerate(dataset.train(fold)):
progress(title_text='Collecting data',
fold=fold,
percentage=(float(item_id) / file_count),
note=os.path.split(item['file'])[1])
# Load features
if os.path.isfile(get_feature_filename(audio_file=item['file'], path=feature_path)):
feature_data = load_data(get_feature_filename(audio_file=item['file'], path=feature_path))['stat']
else:
raise IOError("Feature file not found [%s]" % (item['file']))
# Accumulate statistics
normalizer.accumulate(feature_data)
# Calculate normalization factors
normalizer.finalize()
# Save
save_data(current_normalizer_file, normalizer)
def do_system_training(dataset, model_path, feature_normalizer_path, feature_path, classifier_params,
dataset_evaluation_mode='folds', classifier_method='gmm', overwrite=False):
"""System training
model container format:
{
'normalizer': normalizer class
'models' :
{
'office' : mixture.GMM class
'home' : mixture.GMM class
...
}
}
Parameters
----------
dataset : class
dataset class
model_path : str
path where the models are saved.
feature_normalizer_path : str
path where the feature normalizers are saved.
feature_path : str
path where the features are saved.
classifier_params : dict
parameter dict
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
classifier_method : str ['gmm']
classifier method, currently only GMM supported
(Default value='gmm')
overwrite : bool
overwrite existing models
(Default value=False)
Returns
-------
nothing
Raises
-------
ValueError
classifier_method is unknown.
IOError
Feature normalizer not found.
Feature file not found.
"""
if classifier_method != 'gmm' and classifier_method != 'dnn':
raise ValueError("Unknown classifier method [" + classifier_method + "]")
# Check that target path exists, create if not
check_path(model_path)
for fold in dataset.folds(mode=dataset_evaluation_mode):
current_model_file = get_model_filename(fold=fold, path=model_path)
if not os.path.isfile(current_model_file) or overwrite:
# Load normalizer
feature_normalizer_filename = get_feature_normalizer_filename(fold=fold, path=feature_normalizer_path)
if os.path.isfile(feature_normalizer_filename):
normalizer = load_data(feature_normalizer_filename)
else:
raise IOError("Feature normalizer not found [%s]" % feature_normalizer_filename)
# Initialize model container
model_container = {'normalizer': normalizer, 'models': {}}
# Collect training examples
file_count = len(dataset.train(fold))
data = {}
for item_id, item in enumerate(dataset.train(fold)):
progress(title_text='Collecting data',
fold=fold,
percentage=(float(item_id) / file_count),
note=os.path.split(item['file'])[1])
# Load features
feature_filename = get_feature_filename(audio_file=item['file'], path=feature_path)
if os.path.isfile(feature_filename):
feature_data = load_data(feature_filename)['feat']
else:
raise IOError("Features not found [%s]" % (item['file']))
# Scale features
feature_data = model_container['normalizer'].normalize(feature_data)
# Store features per class label
if item['scene_label'] not in data:
data[item['scene_label']] = feature_data
else:
data[item['scene_label']] = numpy.vstack((data[item['scene_label']], feature_data))
le = pp.LabelEncoder()
tot_data = {}
# Train models for each class
for label in data:
progress(title_text='Train models',
fold=fold,
note=label)
if classifier_method == 'gmm':
model_container['models'][label] = mixture.GMM(**classifier_params).fit(data[label])
elif classifier_method == 'dnn':
if 'x' not in tot_data:
tot_data['x'] = data[label]
tot_data['y'] = numpy.repeat(label, len(data[label]), axis=0)
else:
tot_data['x'] = numpy.vstack((tot_data['x'], data[label]))
tot_data['y'] = numpy.hstack((tot_data['y'], numpy.repeat(label, len(data[label]), axis=0)))
else:
raise ValueError("Unknown classifier method [" + classifier_method + "]")
clf = skflow.TensorFlowDNNClassifier(**classifier_params)
if classifier_method == 'dnn':
tot_data['y'] = le.fit_transform(tot_data['y'])
clf.fit(tot_data['x'], tot_data['y'])
clf.save('dnn/dnnmodel1')
# Save models
save_data(current_model_file, model_container)
def do_system_testing(dataset, result_path, feature_path, model_path, feature_params,
dataset_evaluation_mode='folds', classifier_method='gmm', overwrite=False):
"""System testing.
If extracted features are not found from disk, they are extracted but not saved.
Parameters
----------
dataset : class
dataset class
result_path : str
path where the results are saved.
feature_path : str
path where the features are saved.
model_path : str
path where the models are saved.
feature_params : dict
parameter dict
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
classifier_method : str ['gmm']
classifier method, currently only GMM supported
(Default value='gmm')
overwrite : bool
overwrite existing models
(Default value=False)
Returns
-------
nothing
Raises
-------
ValueError
classifier_method is unknown.
IOError
Model file not found.
Audio file not found.
"""
if classifier_method != 'gmm' and classifier_method != 'dnn':
raise ValueError("Unknown classifier method [" + classifier_method + "]")
# Check that target path exists, create if not
check_path(result_path)
for fold in dataset.folds(mode=dataset_evaluation_mode):
current_result_file = get_result_filename(fold=fold, path=result_path)
if not os.path.isfile(current_result_file) or overwrite:
results = []
# Load class model container
model_filename = get_model_filename(fold=fold, path=model_path)
if os.path.isfile(model_filename):
model_container = load_data(model_filename)
else:
raise IOError("Model file not found [%s]" % model_filename)
file_count = len(dataset.test(fold))
for file_id, item in enumerate(dataset.test(fold)):
progress(title_text='Testing',
fold=fold,
percentage=(float(file_id) / file_count),
note=os.path.split(item['file'])[1])
# Load features
feature_filename = get_feature_filename(audio_file=item['file'], path=feature_path)
if os.path.isfile(feature_filename):
feature_data = load_data(feature_filename)['feat']
else:
# Load audio
if os.path.isfile(dataset.relative_to_absolute_path(item['file'])):
y, fs = load_audio(filename=dataset.relative_to_absolute_path(item['file']), mono=True,
fs=feature_params['fs'])
else:
raise IOError("Audio file not found [%s]" % (item['file']))
if feature_params['method'] == 'lfcc':
feature_file_txt = get_feature_filename(audio_file=os.path.split(item['file'])[1],
path=feature_path,
extension='txt')
feature_data = feature_extraction_lfcc(feature_file_txt)
elif feature_params['method'] == 'traps':
feature_data = feature_extraction_traps(y=y,
fs=fs,
traps_params=params['traps'],
mfcc_params=feature_params['mfcc'],
statistics=False)['feat']
else:
feature_data = feature_extraction(y=y,
fs=fs,
include_mfcc0=feature_params['include_mfcc0'],
include_delta=feature_params['include_delta'],
include_acceleration=feature_params['include_acceleration'],
mfcc_params=feature_params['mfcc'],
delta_params=feature_params['mfcc_delta'],
acceleration_params=feature_params['mfcc_acceleration'],
statistics=False)['feat']
# Normalize features
feature_data = model_container['normalizer'].normalize(feature_data)
# Do classification for the block
if classifier_method == 'gmm':
current_result = do_classification_gmm(feature_data, model_container)
current_class = current_result['class']
elif classifier_method == 'dnn':
current_result = do_classification_dnn(feature_data, model_container)
current_class = dataset.scene_labels[current_result['class_id']]
else:
raise ValueError("Unknown classifier method [" + classifier_method + "]")
# Store the result
if classifier_method == 'gmm':
results.append((dataset.absolute_to_relative(item['file']),
current_class))
elif classifier_method == 'dnn':
logs_in_tuple = tuple(lo for lo in current_result['logls'])
results.append((dataset.absolute_to_relative(item['file']),
current_class) + logs_in_tuple)
else:
raise ValueError("Unknown classifier method [" + classifier_method + "]")
# Save testing results
with open(current_result_file, 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for result_item in results:
writer.writerow(result_item)
def do_classification_dnn(feature_data, model_container):
# Initialize log-likelihood matrix to -inf
logls = numpy.empty(15)
logls.fill(-numpy.inf)
model_clf = skflow.TensorFlowEstimator.restore('dnn/dnnmodel1')
logls = numpy.sum(numpy.log(model_clf.predict_proba(feature_data)), 0)
classification_result_id = numpy.argmax(logls)
return {'class_id': classification_result_id,
'logls': logls}
def do_classification_gmm(feature_data, model_container):
"""GMM classification for give feature matrix
model container format:
{
'normalizer': normalizer class
'models' :
{
'office' : mixture.GMM class
'home' : mixture.GMM class
...
}
}
Parameters
----------
feature_data : numpy.ndarray [shape=(t, feature vector length)]
feature matrix
model_container : dict
model container
Returns
-------
result : str
classification result as scene label
"""
# Initialize log-likelihood matrix to -inf
logls = numpy.empty(len(model_container['models']))
logls.fill(-numpy.inf)
for label_id, label in enumerate(model_container['models']):
logls[label_id] = numpy.sum(model_container['models'][label].score(feature_data))
classification_result_id = numpy.argmax(logls)
return {'class': model_container['models'].keys()[classification_result_id],
'logls': logls}
def do_system_evaluation(dataset, result_path, dataset_evaluation_mode='folds'):
"""System evaluation. Testing outputs are collected and evaluated. Evaluation results are printed.
Parameters
----------
dataset : class
dataset class
result_path : str
path where the results are saved.
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
Returns
-------
nothing
Raises
-------
IOError
Result file not found
"""
dcase2016_scene_metric = DCASE2016_SceneClassification_Metrics(class_list=dataset.scene_labels)
results_fold = []
tot_cm = numpy.zeros((dataset.scene_label_count, dataset.scene_label_count))
for fold in dataset.folds(mode=dataset_evaluation_mode):
dcase2016_scene_metric_fold = DCASE2016_SceneClassification_Metrics(class_list=dataset.scene_labels)
results = []
result_filename = get_result_filename(fold=fold, path=result_path)
if os.path.isfile(result_filename):
with open(result_filename, 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
results.append(row)
else:
raise IOError("Result file not found [%s]" % result_filename)
# Rewrite the result file
if os.path.isfile(result_filename):
with open(result_filename+'2', 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for result_item in results:
y_true = (dataset.file_meta(result_item[0])[0]['scene_label'],)
#print type(y_true)
#print type(result_item)
writer.writerow(y_true + tuple(result_item))
y_true = []
y_pred = []
for result in results:
y_true.append(dataset.file_meta(result[0])[0]['scene_label'])
y_pred.append(result[1])
dcase2016_scene_metric.evaluate(system_output=y_pred, annotated_ground_truth=y_true)
dcase2016_scene_metric_fold.evaluate(system_output=y_pred, annotated_ground_truth=y_true)
results_fold.append(dcase2016_scene_metric_fold.results())
tot_cm += confusion_matrix(y_true, y_pred)
final_result['tot_cm'] = tot_cm
final_result['tot_cm_acc'] = numpy.sum(numpy.diag(tot_cm)) / numpy.sum(tot_cm)
results = dcase2016_scene_metric.results()
final_result['result'] = results
print " File-wise evaluation, over %d folds" % dataset.fold_count
fold_labels = ''
separator = ' =====================+======+======+==========+ +'
if dataset.fold_count > 1:
for fold in dataset.folds(mode=dataset_evaluation_mode):
fold_labels += " {:8s} |".format('Fold' + str(fold))
separator += "==========+"
print " {:20s} | {:4s} : {:4s} | {:8s} | |".format('Scene label', 'Nref', 'Nsys', 'Accuracy') + fold_labels
print separator
for label_id, label in enumerate(sorted(results['class_wise_accuracy'])):
fold_values = ''
if dataset.fold_count > 1:
for fold in dataset.folds(mode=dataset_evaluation_mode):
fold_values += " {:5.1f} % |".format(results_fold[fold - 1]['class_wise_accuracy'][label] * 100)
print " {:20s} | {:4d} : {:4d} | {:5.1f} % | |".format(label,
results['class_wise_data'][label]['Nref'],
results['class_wise_data'][label]['Nsys'],
results['class_wise_accuracy'][
label] * 100) + fold_values
print separator
fold_values = ''
if dataset.fold_count > 1:
for fold in dataset.folds(mode=dataset_evaluation_mode):
fold_values += " {:5.1f} % |".format(results_fold[fold - 1]['overall_accuracy'] * 100)
print " {:20s} | {:4d} : {:4d} | {:5.1f} % | |".format('Overall accuracy',
results['Nref'],
results['Nsys'],
results['overall_accuracy'] * 100) + fold_values
if __name__ == "__main__":
try:
sys.exit(main(sys.argv))
except (ValueError, IOError) as e:
sys.exit(e)
| mit |
yl565/statsmodels | statsmodels/graphics/tests/test_mosaicplot.py | 3 | 19198 | from __future__ import division
from statsmodels.compat.python import iterkeys, zip, lrange, iteritems, range
from numpy.testing import assert_, assert_raises, dec
from numpy.testing import run_module_suite
# utilities for the tests
from statsmodels.compat.pandas import sort_values
from statsmodels.compat.collections import OrderedDict
from statsmodels.api import datasets
import numpy as np
from itertools import product
try:
import matplotlib.pyplot as pylab
have_matplotlib = True
except:
have_matplotlib = False
from statsmodels.compat.pandas import version as pandas_version
pandas_old = pandas_version < '0.9'
# the main drawing function
from statsmodels.graphics.mosaicplot import mosaic
# other functions to be tested for accuracy
from statsmodels.graphics.mosaicplot import _hierarchical_split
from statsmodels.graphics.mosaicplot import _reduce_dict
from statsmodels.graphics.mosaicplot import _key_splitting
from statsmodels.graphics.mosaicplot import _normalize_split
from statsmodels.graphics.mosaicplot import _split_rect
@dec.skipif(not have_matplotlib or pandas_old)
def test_data_conversion():
# It will not reorder the elements
# so the dictionary will look odd
# as it key order has the c and b
# keys swapped
import pandas
fig, ax = pylab.subplots(4, 4)
data = {'ax': 1, 'bx': 2, 'cx': 3}
mosaic(data, ax=ax[0, 0], title='basic dict', axes_label=False)
data = pandas.Series(data)
mosaic(data, ax=ax[0, 1], title='basic series', axes_label=False)
data = [1, 2, 3]
mosaic(data, ax=ax[0, 2], title='basic list', axes_label=False)
data = np.asarray(data)
mosaic(data, ax=ax[0, 3], title='basic array', axes_label=False)
data = {('ax', 'cx'): 1, ('bx', 'cx'): 2, ('ax', 'dx'): 3, ('bx', 'dx'): 4}
mosaic(data, ax=ax[1, 0], title='compound dict', axes_label=False)
mosaic(data, ax=ax[2, 0], title='inverted keys dict', index=[1, 0], axes_label=False)
data = pandas.Series(data)
mosaic(data, ax=ax[1, 1], title='compound series', axes_label=False)
mosaic(data, ax=ax[2, 1], title='inverted keys series', index=[1, 0])
data = [[1, 2], [3, 4]]
mosaic(data, ax=ax[1, 2], title='compound list', axes_label=False)
mosaic(data, ax=ax[2, 2], title='inverted keys list', index=[1, 0])
data = np.array([[1, 2], [3, 4]])
mosaic(data, ax=ax[1, 3], title='compound array', axes_label=False)
mosaic(data, ax=ax[2, 3], title='inverted keys array', index=[1, 0], axes_label=False)
gender = ['male', 'male', 'male', 'female', 'female', 'female']
pet = ['cat', 'dog', 'dog', 'cat', 'dog', 'cat']
data = pandas.DataFrame({'gender': gender, 'pet': pet})
mosaic(data, ['gender'], ax=ax[3, 0], title='dataframe by key 1', axes_label=False)
mosaic(data, ['pet'], ax=ax[3, 1], title='dataframe by key 2', axes_label=False)
mosaic(data, ['gender', 'pet'], ax=ax[3, 2], title='both keys', axes_label=False)
mosaic(data, ['pet', 'gender'], ax=ax[3, 3], title='keys inverted', axes_label=False)
pylab.suptitle('testing data conversion (plot 1 of 4)')
#pylab.show()
pylab.close('all')
@dec.skipif(not have_matplotlib)
def test_mosaic_simple():
# display a simple plot of 4 categories of data, splitted in four
# levels with increasing size for each group
# creation of the levels
key_set = (['male', 'female'], ['old', 'adult', 'young'],
['worker', 'unemployed'], ['healty', 'ill'])
# the cartesian product of all the categories is
# the complete set of categories
keys = list(product(*key_set))
data = OrderedDict(zip(keys, range(1, 1 + len(keys))))
# which colours should I use for the various categories?
# put it into a dict
props = {}
#males and females in blue and red
props[('male',)] = {'color': 'b'}
props[('female',)] = {'color': 'r'}
# all the groups corresponding to ill groups have a different color
for key in keys:
if 'ill' in key:
if 'male' in key:
props[key] = {'color': 'BlueViolet' , 'hatch': '+'}
else:
props[key] = {'color': 'Crimson' , 'hatch': '+'}
# mosaic of the data, with given gaps and colors
mosaic(data, gap=0.05, properties=props, axes_label=False)
pylab.suptitle('syntetic data, 4 categories (plot 2 of 4)')
#pylab.show()
pylab.close('all')
@dec.skipif(not have_matplotlib or pandas_old)
def test_mosaic():
# make the same analysis on a known dataset
# load the data and clean it a bit
affairs = datasets.fair.load_pandas()
datas = affairs.exog
# any time greater than 0 is cheating
datas['cheated'] = affairs.endog > 0
# sort by the marriage quality and give meaningful name
# [rate_marriage, age, yrs_married, children,
# religious, educ, occupation, occupation_husb]
datas = sort_values(datas, ['rate_marriage', 'religious'])
num_to_desc = {1: 'awful', 2: 'bad', 3: 'intermediate',
4: 'good', 5: 'wonderful'}
datas['rate_marriage'] = datas['rate_marriage'].map(num_to_desc)
num_to_faith = {1: 'non religious', 2: 'poorly religious', 3: 'religious',
4: 'very religious'}
datas['religious'] = datas['religious'].map(num_to_faith)
num_to_cheat = {False: 'faithful', True: 'cheated'}
datas['cheated'] = datas['cheated'].map(num_to_cheat)
# finished cleaning
fig, ax = pylab.subplots(2, 2)
mosaic(datas, ['rate_marriage', 'cheated'], ax=ax[0, 0],
title='by marriage happiness')
mosaic(datas, ['religious', 'cheated'], ax=ax[0, 1],
title='by religiosity')
mosaic(datas, ['rate_marriage', 'religious', 'cheated'], ax=ax[1, 0],
title='by both', labelizer=lambda k:'')
ax[1, 0].set_xlabel('marriage rating')
ax[1, 0].set_ylabel('religion status')
mosaic(datas, ['religious', 'rate_marriage'], ax=ax[1, 1],
title='inter-dependence', axes_label=False)
pylab.suptitle("extramarital affairs (plot 3 of 4)")
#pylab.show()
pylab.close('all')
@dec.skipif(not have_matplotlib)
def test_mosaic_very_complex():
# make a scattermatrix of mosaic plots to show the correlations between
# each pair of variable in a dataset. Could be easily converted into a
# new function that does this automatically based on the type of data
key_name = ['gender', 'age', 'health', 'work']
key_base = (['male', 'female'], ['old', 'young'],
['healty', 'ill'], ['work', 'unemployed'])
keys = list(product(*key_base))
data = OrderedDict(zip(keys, range(1, 1 + len(keys))))
props = {}
props[('male', 'old')] = {'color': 'r'}
props[('female',)] = {'color': 'pink'}
L = len(key_base)
fig, axes = pylab.subplots(L, L)
for i in range(L):
for j in range(L):
m = set(range(L)).difference(set((i, j)))
if i == j:
axes[i, i].text(0.5, 0.5, key_name[i],
ha='center', va='center')
axes[i, i].set_xticks([])
axes[i, i].set_xticklabels([])
axes[i, i].set_yticks([])
axes[i, i].set_yticklabels([])
else:
ji = max(i, j)
ij = min(i, j)
temp_data = OrderedDict([((k[ij], k[ji]) + tuple(k[r] for r in m), v)
for k, v in iteritems(data)])
keys = list(iterkeys(temp_data))
for k in keys:
value = _reduce_dict(temp_data, k[:2])
temp_data[k[:2]] = value
del temp_data[k]
mosaic(temp_data, ax=axes[i, j], axes_label=False,
properties=props, gap=0.05, horizontal=i > j)
pylab.suptitle('old males should look bright red, (plot 4 of 4)')
#pylab.show()
pylab.close('all')
@dec.skipif(not have_matplotlib)
def test_axes_labeling():
from numpy.random import rand
key_set = (['male', 'female'], ['old', 'adult', 'young'],
['worker', 'unemployed'], ['yes', 'no'])
# the cartesian product of all the categories is
# the complete set of categories
keys = list(product(*key_set))
data = OrderedDict(zip(keys, rand(len(keys))))
lab = lambda k: ''.join(s[0] for s in k)
fig, (ax1, ax2) = pylab.subplots(1, 2, figsize=(16, 8))
mosaic(data, ax=ax1, labelizer=lab, horizontal=True, label_rotation=45)
mosaic(data, ax=ax2, labelizer=lab, horizontal=False,
label_rotation=[0, 45, 90, 0])
#fig.tight_layout()
fig.suptitle("correct alignment of the axes labels")
#pylab.show()
pylab.close('all')
@dec.skipif(not have_matplotlib or pandas_old)
def test_mosaic_empty_cells():
# SMOKE test see #2286
import pandas as pd
mydata = pd.DataFrame({'id2': {64: 'Angelica',
65: 'DXW_UID', 66: 'casuid01',
67: 'casuid01', 68: 'EC93_uid',
69: 'EC93_uid', 70: 'EC93_uid',
60: 'DXW_UID', 61: 'AtmosFox',
62: 'DXW_UID', 63: 'DXW_UID'},
'id1': {64: 'TGP',
65: 'Retention01', 66: 'default',
67: 'default', 68: 'Musa_EC_9_3',
69: 'Musa_EC_9_3', 70: 'Musa_EC_9_3',
60: 'default', 61: 'default',
62: 'default', 63: 'default'}})
ct = pd.crosstab(mydata.id1, mydata.id2)
fig, vals = mosaic(ct.T.unstack())
pylab.close('all')
fig, vals = mosaic(mydata, ['id1','id2'])
pylab.close('all')
eq = lambda x, y: assert_(np.allclose(x, y))
def test_recursive_split():
keys = list(product('mf'))
data = OrderedDict(zip(keys, [1] * len(keys)))
res = _hierarchical_split(data, gap=0)
assert_(list(iterkeys(res)) == keys)
res[('m',)] = (0.0, 0.0, 0.5, 1.0)
res[('f',)] = (0.5, 0.0, 0.5, 1.0)
keys = list(product('mf', 'yao'))
data = OrderedDict(zip(keys, [1] * len(keys)))
res = _hierarchical_split(data, gap=0)
assert_(list(iterkeys(res)) == keys)
res[('m', 'y')] = (0.0, 0.0, 0.5, 1 / 3)
res[('m', 'a')] = (0.0, 1 / 3, 0.5, 1 / 3)
res[('m', 'o')] = (0.0, 2 / 3, 0.5, 1 / 3)
res[('f', 'y')] = (0.5, 0.0, 0.5, 1 / 3)
res[('f', 'a')] = (0.5, 1 / 3, 0.5, 1 / 3)
res[('f', 'o')] = (0.5, 2 / 3, 0.5, 1 / 3)
def test__reduce_dict():
data = OrderedDict(zip(list(product('mf', 'oy', 'wn')), [1] * 8))
eq(_reduce_dict(data, ('m',)), 4)
eq(_reduce_dict(data, ('m', 'o')), 2)
eq(_reduce_dict(data, ('m', 'o', 'w')), 1)
data = OrderedDict(zip(list(product('mf', 'oy', 'wn')), lrange(8)))
eq(_reduce_dict(data, ('m',)), 6)
eq(_reduce_dict(data, ('m', 'o')), 1)
eq(_reduce_dict(data, ('m', 'o', 'w')), 0)
def test__key_splitting():
# subdivide starting with an empty tuple
base_rect = {tuple(): (0, 0, 1, 1)}
res = _key_splitting(base_rect, ['a', 'b'], [1, 1], tuple(), True, 0)
assert_(list(iterkeys(res)) == [('a',), ('b',)])
eq(res[('a',)], (0, 0, 0.5, 1))
eq(res[('b',)], (0.5, 0, 0.5, 1))
# subdivide a in two sublevel
res_bis = _key_splitting(res, ['c', 'd'], [1, 1], ('a',), False, 0)
assert_(list(iterkeys(res_bis)) == [('a', 'c'), ('a', 'd'), ('b',)])
eq(res_bis[('a', 'c')], (0.0, 0.0, 0.5, 0.5))
eq(res_bis[('a', 'd')], (0.0, 0.5, 0.5, 0.5))
eq(res_bis[('b',)], (0.5, 0, 0.5, 1))
# starting with a non empty tuple and uneven distribution
base_rect = {('total',): (0, 0, 1, 1)}
res = _key_splitting(base_rect, ['a', 'b'], [1, 2], ('total',), True, 0)
assert_(list(iterkeys(res)) == [('total',) + (e,) for e in ['a', 'b']])
eq(res[('total', 'a')], (0, 0, 1 / 3, 1))
eq(res[('total', 'b')], (1 / 3, 0, 2 / 3, 1))
def test_proportion_normalization():
# extremes should give the whole set, as well
# as if 0 is inserted
eq(_normalize_split(0.), [0.0, 0.0, 1.0])
eq(_normalize_split(1.), [0.0, 1.0, 1.0])
eq(_normalize_split(2.), [0.0, 1.0, 1.0])
# negative values should raise ValueError
assert_raises(ValueError, _normalize_split, -1)
assert_raises(ValueError, _normalize_split, [1., -1])
assert_raises(ValueError, _normalize_split, [1., -1, 0.])
# if everything is zero it will complain
assert_raises(ValueError, _normalize_split, [0.])
assert_raises(ValueError, _normalize_split, [0., 0.])
# one-element array should return the whole interval
eq(_normalize_split([0.5]), [0.0, 1.0])
eq(_normalize_split([1.]), [0.0, 1.0])
eq(_normalize_split([2.]), [0.0, 1.0])
# simple division should give two pieces
for x in [0.3, 0.5, 0.9]:
eq(_normalize_split(x), [0., x, 1.0])
# multiple division should split as the sum of the components
for x, y in [(0.25, 0.5), (0.1, 0.8), (10., 30.)]:
eq(_normalize_split([x, y]), [0., x / (x + y), 1.0])
for x, y, z in [(1., 1., 1.), (0.1, 0.5, 0.7), (10., 30., 40)]:
eq(_normalize_split(
[x, y, z]), [0., x / (x + y + z), (x + y) / (x + y + z), 1.0])
def test_false_split():
# if you ask it to be divided in only one piece, just return the original
# one
pure_square = [0., 0., 1., 1.]
conf_h = dict(proportion=[1], gap=0.0, horizontal=True)
conf_v = dict(proportion=[1], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_h), pure_square)
eq(_split_rect(*pure_square, **conf_v), pure_square)
conf_h = dict(proportion=[1], gap=0.5, horizontal=True)
conf_v = dict(proportion=[1], gap=0.5, horizontal=False)
eq(_split_rect(*pure_square, **conf_h), pure_square)
eq(_split_rect(*pure_square, **conf_v), pure_square)
# identity on a void rectangle should not give anything strange
null_square = [0., 0., 0., 0.]
conf = dict(proportion=[1], gap=0.0, horizontal=True)
eq(_split_rect(*null_square, **conf), null_square)
conf = dict(proportion=[1], gap=1.0, horizontal=True)
eq(_split_rect(*null_square, **conf), null_square)
# splitting a negative rectangle should raise error
neg_square = [0., 0., -1., 0.]
conf = dict(proportion=[1], gap=0.0, horizontal=True)
assert_raises(ValueError, _split_rect, *neg_square, **conf)
conf = dict(proportion=[1, 1], gap=0.0, horizontal=True)
assert_raises(ValueError, _split_rect, *neg_square, **conf)
conf = dict(proportion=[1], gap=0.5, horizontal=True)
assert_raises(ValueError, _split_rect, *neg_square, **conf)
conf = dict(proportion=[1, 1], gap=0.5, horizontal=True)
assert_raises(ValueError, _split_rect, *neg_square, **conf)
def test_rect_pure_split():
pure_square = [0., 0., 1., 1.]
# division in two equal pieces from the perfect square
h_2split = [(0.0, 0.0, 0.5, 1.0), (0.5, 0.0, 0.5, 1.0)]
conf_h = dict(proportion=[1, 1], gap=0.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
v_2split = [(0.0, 0.0, 1.0, 0.5), (0.0, 0.5, 1.0, 0.5)]
conf_v = dict(proportion=[1, 1], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_v), v_2split)
# division in two non-equal pieces from the perfect square
h_2split = [(0.0, 0.0, 1 / 3, 1.0), (1 / 3, 0.0, 2 / 3, 1.0)]
conf_h = dict(proportion=[1, 2], gap=0.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
v_2split = [(0.0, 0.0, 1.0, 1 / 3), (0.0, 1 / 3, 1.0, 2 / 3)]
conf_v = dict(proportion=[1, 2], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_v), v_2split)
# division in three equal pieces from the perfect square
h_2split = [(0.0, 0.0, 1 / 3, 1.0), (1 / 3, 0.0, 1 / 3, 1.0), (2 / 3, 0.0,
1 / 3, 1.0)]
conf_h = dict(proportion=[1, 1, 1], gap=0.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
v_2split = [(0.0, 0.0, 1.0, 1 / 3), (0.0, 1 / 3, 1.0, 1 / 3), (0.0, 2 / 3,
1.0, 1 / 3)]
conf_v = dict(proportion=[1, 1, 1], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_v), v_2split)
# division in three non-equal pieces from the perfect square
h_2split = [(0.0, 0.0, 1 / 4, 1.0), (1 / 4, 0.0, 1 / 2, 1.0), (3 / 4, 0.0,
1 / 4, 1.0)]
conf_h = dict(proportion=[1, 2, 1], gap=0.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
v_2split = [(0.0, 0.0, 1.0, 1 / 4), (0.0, 1 / 4, 1.0, 1 / 2), (0.0, 3 / 4,
1.0, 1 / 4)]
conf_v = dict(proportion=[1, 2, 1], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_v), v_2split)
# splitting on a void rectangle should give multiple void
null_square = [0., 0., 0., 0.]
conf = dict(proportion=[1, 1], gap=0.0, horizontal=True)
eq(_split_rect(*null_square, **conf), [null_square, null_square])
conf = dict(proportion=[1, 2], gap=1.0, horizontal=True)
eq(_split_rect(*null_square, **conf), [null_square, null_square])
def test_rect_deformed_split():
non_pure_square = [1., -1., 1., 0.5]
# division in two equal pieces from the perfect square
h_2split = [(1.0, -1.0, 0.5, 0.5), (1.5, -1.0, 0.5, 0.5)]
conf_h = dict(proportion=[1, 1], gap=0.0, horizontal=True)
eq(_split_rect(*non_pure_square, **conf_h), h_2split)
v_2split = [(1.0, -1.0, 1.0, 0.25), (1.0, -0.75, 1.0, 0.25)]
conf_v = dict(proportion=[1, 1], gap=0.0, horizontal=False)
eq(_split_rect(*non_pure_square, **conf_v), v_2split)
# division in two non-equal pieces from the perfect square
h_2split = [(1.0, -1.0, 1 / 3, 0.5), (1 + 1 / 3, -1.0, 2 / 3, 0.5)]
conf_h = dict(proportion=[1, 2], gap=0.0, horizontal=True)
eq(_split_rect(*non_pure_square, **conf_h), h_2split)
v_2split = [(1.0, -1.0, 1.0, 1 / 6), (1.0, 1 / 6 - 1, 1.0, 2 / 6)]
conf_v = dict(proportion=[1, 2], gap=0.0, horizontal=False)
eq(_split_rect(*non_pure_square, **conf_v), v_2split)
def test_gap_split():
pure_square = [0., 0., 1., 1.]
# null split
conf_h = dict(proportion=[1], gap=1.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), pure_square)
# equal split
h_2split = [(0.0, 0.0, 0.25, 1.0), (0.75, 0.0, 0.25, 1.0)]
conf_h = dict(proportion=[1, 1], gap=1.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
# disequal split
h_2split = [(0.0, 0.0, 1 / 6, 1.0), (0.5 + 1 / 6, 0.0, 1 / 3, 1.0)]
conf_h = dict(proportion=[1, 2], gap=1.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
@dec.skipif(not have_matplotlib or pandas_old)
def test_default_arg_index():
# 2116
import pandas as pd
df = pd.DataFrame({'size' : ['small', 'large', 'large', 'small', 'large',
'small'],
'length' : ['long', 'short', 'short', 'long', 'long',
'short']})
assert_raises(ValueError, mosaic, data=df, title='foobar')
pylab.close('all')
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause |
wubr2000/zipline | zipline/finance/performance/tracker.py | 14 | 23349 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Performance Tracking
====================
+-----------------+----------------------------------------------------+
| key | value |
+=================+====================================================+
| period_start | The beginning of the period to be tracked. datetime|
| | in pytz.utc timezone. Will always be 0:00 on the |
| | date in UTC. The fact that the time may be on the |
| | prior day in the exchange's local time is ignored |
+-----------------+----------------------------------------------------+
| period_end | The end of the period to be tracked. datetime |
| | in pytz.utc timezone. Will always be 23:59 on the |
| | date in UTC. The fact that the time may be on the |
| | next day in the exchange's local time is ignored |
+-----------------+----------------------------------------------------+
| progress | percentage of test completed |
+-----------------+----------------------------------------------------+
| capital_base | The initial capital assumed for this tracker. |
+-----------------+----------------------------------------------------+
| cumulative_perf | A dictionary representing the cumulative |
| | performance through all the events delivered to |
| | this tracker. For details see the comments on |
| | :py:meth:`PerformancePeriod.to_dict` |
+-----------------+----------------------------------------------------+
| todays_perf | A dictionary representing the cumulative |
| | performance through all the events delivered to |
| | this tracker with datetime stamps between last_open|
| | and last_close. For details see the comments on |
| | :py:meth:`PerformancePeriod.to_dict` |
| | TODO: adding this because we calculate it. May be |
| | overkill. |
+-----------------+----------------------------------------------------+
| cumulative_risk | A dictionary representing the risk metrics |
| _metrics | calculated based on the positions aggregated |
| | through all the events delivered to this tracker. |
| | For details look at the comments for |
| | :py:meth:`zipline.finance.risk.RiskMetrics.to_dict`|
+-----------------+----------------------------------------------------+
"""
from __future__ import division
import logbook
import pickle
from six import iteritems
from datetime import datetime
import numpy as np
import pandas as pd
from pandas.tseries.tools import normalize_date
import zipline.finance.risk as risk
from zipline.finance.trading import TradingEnvironment
from . period import PerformancePeriod
from zipline.utils.serialization_utils import (
VERSION_LABEL
)
from . position_tracker import PositionTracker
log = logbook.Logger('Performance')
class PerformanceTracker(object):
"""
Tracks the performance of the algorithm.
"""
def __init__(self, sim_params):
self.sim_params = sim_params
env = TradingEnvironment.instance()
self.period_start = self.sim_params.period_start
self.period_end = self.sim_params.period_end
self.last_close = self.sim_params.last_close
first_open = self.sim_params.first_open.tz_convert(env.exchange_tz)
self.day = pd.Timestamp(datetime(first_open.year, first_open.month,
first_open.day), tz='UTC')
self.market_open, self.market_close = env.get_open_and_close(self.day)
self.total_days = self.sim_params.days_in_period
self.capital_base = self.sim_params.capital_base
self.emission_rate = sim_params.emission_rate
all_trading_days = env.trading_days
mask = ((all_trading_days >= normalize_date(self.period_start)) &
(all_trading_days <= normalize_date(self.period_end)))
self.trading_days = all_trading_days[mask]
self.dividend_frame = pd.DataFrame()
self._dividend_count = 0
self.position_tracker = PositionTracker()
self.perf_periods = []
if self.emission_rate == 'daily':
self.all_benchmark_returns = pd.Series(
index=self.trading_days)
self.cumulative_risk_metrics = \
risk.RiskMetricsCumulative(self.sim_params)
elif self.emission_rate == 'minute':
self.all_benchmark_returns = pd.Series(index=pd.date_range(
self.sim_params.first_open, self.sim_params.last_close,
freq='Min'))
self.cumulative_risk_metrics = \
risk.RiskMetricsCumulative(self.sim_params,
create_first_day_stats=True)
self.minute_performance = PerformancePeriod(
# initial cash is your capital base.
self.capital_base,
# the cumulative period will be calculated over the
# entire test.
self.period_start,
self.period_end,
# don't save the transactions for the cumulative
# period
keep_transactions=False,
keep_orders=False,
# don't serialize positions for cumualtive period
serialize_positions=False
)
self.minute_performance.position_tracker = self.position_tracker
self.perf_periods.append(self.minute_performance)
# this performance period will span the entire simulation from
# inception.
self.cumulative_performance = PerformancePeriod(
# initial cash is your capital base.
self.capital_base,
# the cumulative period will be calculated over the entire test.
self.period_start,
self.period_end,
# don't save the transactions for the cumulative
# period
keep_transactions=False,
keep_orders=False,
# don't serialize positions for cumualtive period
serialize_positions=False,
)
self.cumulative_performance.position_tracker = self.position_tracker
self.perf_periods.append(self.cumulative_performance)
# this performance period will span just the current market day
self.todays_performance = PerformancePeriod(
# initial cash is your capital base.
self.capital_base,
# the daily period will be calculated for the market day
self.market_open,
self.market_close,
keep_transactions=True,
keep_orders=True,
serialize_positions=True,
)
self.todays_performance.position_tracker = self.position_tracker
self.perf_periods.append(self.todays_performance)
self.saved_dt = self.period_start
# one indexed so that we reach 100%
self.day_count = 0.0
self.txn_count = 0
self.account_needs_update = True
self._account = None
def __repr__(self):
return "%s(%r)" % (
self.__class__.__name__,
{'simulation parameters': self.sim_params})
@property
def progress(self):
if self.emission_rate == 'minute':
# Fake a value
return 1.0
elif self.emission_rate == 'daily':
return self.day_count / self.total_days
def set_date(self, date):
if self.emission_rate == 'minute':
self.saved_dt = date
self.todays_performance.period_close = self.saved_dt
def update_dividends(self, new_dividends):
"""
Update our dividend frame with new dividends. @new_dividends should be
a DataFrame with columns containing at least the entries in
zipline.protocol.DIVIDEND_FIELDS.
"""
# Mark each new dividend with a unique integer id. This ensures that
# we can differentiate dividends whose date/sid fields are otherwise
# identical.
new_dividends['id'] = np.arange(
self._dividend_count,
self._dividend_count + len(new_dividends),
)
self._dividend_count += len(new_dividends)
self.dividend_frame = pd.concat(
[self.dividend_frame, new_dividends]
).sort(['pay_date', 'ex_date']).set_index('id', drop=False)
def initialize_dividends_from_other(self, other):
"""
Helper for copying dividends to a new PerformanceTracker while
preserving dividend count. Useful if a simulation needs to create a
new PerformanceTracker mid-stream and wants to preserve stored dividend
info.
Note that this does not copy unpaid dividends.
"""
self.dividend_frame = other.dividend_frame
self._dividend_count = other._dividend_count
def handle_sid_removed_from_universe(self, sid):
"""
This method handles any behaviors that must occur when a SID leaves the
universe of the TradingAlgorithm.
Parameters
__________
sid : int
The sid of the Asset being removed from the universe.
"""
# Drop any dividends for the sid from the dividends frame
self.dividend_frame = self.dividend_frame[
self.dividend_frame.sid != sid
]
def update_performance(self):
# calculate performance as of last trade
for perf_period in self.perf_periods:
perf_period.calculate_performance()
def get_portfolio(self, performance_needs_update):
if performance_needs_update:
self.update_performance()
self.account_needs_update = True
return self.cumulative_performance.as_portfolio()
def get_account(self, performance_needs_update):
if performance_needs_update:
self.update_performance()
self.account_needs_update = True
if self.account_needs_update:
self._update_account()
return self._account
def _update_account(self):
self._account = self.cumulative_performance.as_account()
self.account_needs_update = False
def to_dict(self, emission_type=None):
"""
Creates a dictionary representing the state of this tracker.
Returns a dict object of the form described in header comments.
"""
# Default to the emission rate of this tracker if no type is provided
if emission_type is None:
emission_type = self.emission_rate
_dict = {
'period_start': self.period_start,
'period_end': self.period_end,
'capital_base': self.capital_base,
'cumulative_perf': self.cumulative_performance.to_dict(),
'progress': self.progress,
'cumulative_risk_metrics': self.cumulative_risk_metrics.to_dict()
}
if emission_type == 'daily':
_dict['daily_perf'] = self.todays_performance.to_dict()
elif emission_type == 'minute':
_dict['minute_perf'] = self.todays_performance.to_dict(
self.saved_dt)
else:
raise ValueError("Invalid emission type: %s" % emission_type)
return _dict
def process_trade(self, event):
# update last sale, and pay out a cash adjustment
cash_adjustment = self.position_tracker.update_last_sale(event)
if cash_adjustment != 0:
for perf_period in self.perf_periods:
perf_period.handle_cash_payment(cash_adjustment)
def process_transaction(self, event):
self.txn_count += 1
self.position_tracker.execute_transaction(event)
for perf_period in self.perf_periods:
perf_period.handle_execution(event)
def process_dividend(self, dividend):
log.info("Ignoring DIVIDEND event.")
def process_split(self, event):
leftover_cash = self.position_tracker.handle_split(event)
if leftover_cash > 0:
for perf_period in self.perf_periods:
perf_period.handle_cash_payment(leftover_cash)
def process_order(self, event):
for perf_period in self.perf_periods:
perf_period.record_order(event)
def process_commission(self, event):
self.position_tracker.handle_commission(event)
for perf_period in self.perf_periods:
perf_period.handle_commission(event)
def process_benchmark(self, event):
if self.sim_params.data_frequency == 'minute' and \
self.sim_params.emission_rate == 'daily':
# Minute data benchmarks should have a timestamp of market
# close, so that calculations are triggered at the right time.
# However, risk module uses midnight as the 'day'
# marker for returns, so adjust back to midnight.
midnight = pd.tseries.tools.normalize_date(event.dt)
else:
midnight = event.dt
if midnight not in self.all_benchmark_returns.index:
raise AssertionError(
("Date %s not allocated in all_benchmark_returns. "
"Calendar seems to mismatch with benchmark. "
"Benchmark container is=%s" %
(midnight,
self.all_benchmark_returns.index)))
self.all_benchmark_returns[midnight] = event.returns
def process_close_position(self, event):
# CLOSE_POSITION events that contain prices that must be handled as
# a final trade event
if 'price' in event:
self.process_trade(event)
txn = self.position_tracker.\
maybe_create_close_position_transaction(event)
if txn:
self.process_transaction(txn)
def check_upcoming_dividends(self, next_trading_day):
"""
Check if we currently own any stocks with dividends whose ex_date is
the next trading day. Track how much we should be payed on those
dividends' pay dates.
Then check if we are owed cash/stock for any dividends whose pay date
is the next trading day. Apply all such benefits, then recalculate
performance.
"""
if len(self.dividend_frame) == 0:
# We don't currently know about any dividends for this simulation
# period, so bail.
return
# Dividends whose ex_date is the next trading day. We need to check if
# we own any of these stocks so we know to pay them out when the pay
# date comes.
ex_date_mask = (self.dividend_frame['ex_date'] == next_trading_day)
dividends_earnable = self.dividend_frame[ex_date_mask]
# Dividends whose pay date is the next trading day. If we held any of
# these stocks on midnight before the ex_date, we need to pay these out
# now.
pay_date_mask = (self.dividend_frame['pay_date'] == next_trading_day)
dividends_payable = self.dividend_frame[pay_date_mask]
position_tracker = self.position_tracker
if len(dividends_earnable):
position_tracker.earn_dividends(dividends_earnable)
if not len(dividends_payable):
return
net_cash_payment = position_tracker.pay_dividends(dividends_payable)
for period in self.perf_periods:
# notify periods to update their stats
period.handle_dividends_paid(net_cash_payment)
def check_asset_auto_closes(self, next_trading_day):
"""
Check if the position tracker currently owns any Assets with an
auto-close date that is the next trading day. Close those positions.
Parameters
----------
next_trading_day : pandas.Timestamp
The next trading day of the simulation
"""
auto_close_events = self.position_tracker.auto_close_position_events(
next_trading_day=next_trading_day
)
for event in auto_close_events:
self.process_close_position(event)
def handle_minute_close(self, dt):
"""
Handles the close of the given minute. This includes handling
market-close functions if the given minute is the end of the market
day.
Parameters
__________
dt : Timestamp
The minute that is ending
Returns
_______
(dict, dict/None)
A tuple of the minute perf packet and daily perf packet.
If the market day has not ended, the daily perf packet is None.
"""
self.update_performance()
todays_date = normalize_date(dt)
account = self.get_account(False)
self.minute_performance.rollover()
bench_returns = self.all_benchmark_returns.loc[todays_date:dt]
# cumulative returns
bench_since_open = (1. + bench_returns).prod() - 1
self.cumulative_risk_metrics.update(todays_date,
self.todays_performance.returns,
bench_since_open,
account)
minute_packet = self.to_dict(emission_type='minute')
# if this is the close, update dividends for the next day.
# Return the performance tuple
if dt == self.market_close:
return (minute_packet, self._handle_market_close(todays_date))
else:
return (minute_packet, None)
def handle_market_close_daily(self):
"""
Function called after handle_data when running with daily emission
rate.
"""
self.update_performance()
completed_date = self.day
account = self.get_account(False)
# update risk metrics for cumulative performance
self.cumulative_risk_metrics.update(
completed_date,
self.todays_performance.returns,
self.all_benchmark_returns[completed_date],
account)
return self._handle_market_close(completed_date)
def _handle_market_close(self, completed_date):
# increment the day counter before we move markers forward.
self.day_count += 1.0
# Get the next trading day and, if it is past the bounds of this
# simulation, return the daily perf packet
next_trading_day = TradingEnvironment.instance().\
next_trading_day(completed_date)
# Check if any assets need to be auto-closed before generating today's
# perf period
if next_trading_day:
self.check_asset_auto_closes(next_trading_day=next_trading_day)
# Take a snapshot of our current performance to return to the
# browser.
daily_update = self.to_dict(emission_type='daily')
# On the last day of the test, don't create tomorrow's performance
# period. We may not be able to find the next trading day if we're at
# the end of our historical data
if self.market_close >= self.last_close:
return daily_update
# move the market day markers forward
env = TradingEnvironment.instance()
self.market_open, self.market_close = \
env.next_open_and_close(self.day)
self.day = env.next_trading_day(self.day)
# Roll over positions to current day.
self.todays_performance.rollover()
self.todays_performance.period_open = self.market_open
self.todays_performance.period_close = self.market_close
# If the next trading day is irrelevant, then return the daily packet
if (next_trading_day is None) or (next_trading_day >= self.last_close):
return daily_update
# Check for any dividends and auto-closes, then return the daily perf
# packet
self.check_upcoming_dividends(next_trading_day=next_trading_day)
return daily_update
def handle_simulation_end(self):
"""
When the simulation is complete, run the full period risk report
and send it out on the results socket.
"""
log_msg = "Simulated {n} trading days out of {m}."
log.info(log_msg.format(n=int(self.day_count), m=self.total_days))
log.info("first open: {d}".format(
d=self.sim_params.first_open))
log.info("last close: {d}".format(
d=self.sim_params.last_close))
bms = pd.Series(
index=self.cumulative_risk_metrics.cont_index,
data=self.cumulative_risk_metrics.benchmark_returns_cont)
ars = pd.Series(
index=self.cumulative_risk_metrics.cont_index,
data=self.cumulative_risk_metrics.algorithm_returns_cont)
acl = self.cumulative_risk_metrics.algorithm_cumulative_leverages
self.risk_report = risk.RiskReport(
ars,
self.sim_params,
benchmark_returns=bms,
algorithm_leverages=acl)
risk_dict = self.risk_report.to_dict()
return risk_dict
def __getstate__(self):
state_dict = \
{k: v for k, v in iteritems(self.__dict__)
if not k.startswith('_')}
state_dict['dividend_frame'] = pickle.dumps(self.dividend_frame)
state_dict['_dividend_count'] = self._dividend_count
# we already store perf periods as attributes
del state_dict['perf_periods']
STATE_VERSION = 3
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 3
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("PerformanceTracker saved state is too old.")
self.__dict__.update(state)
# Handle the dividend frame specially
self.dividend_frame = pickle.loads(state['dividend_frame'])
# properly setup the perf periods
self.perf_periods = []
p_types = ['cumulative', 'todays', 'minute']
for p_type in p_types:
name = p_type + '_performance'
period = getattr(self, name, None)
if period is None:
continue
period._position_tracker = self.position_tracker
self.perf_periods.append(period)
| apache-2.0 |
AnasGhrab/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 254 | 2253 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
aolindahl/streaking | time_to_energy_conversion.py | 1 | 24533 | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 11 15:49:41 2015
@author: antlin
"""
import matplotlib.pyplot as plt
import numpy as np
import lmfit
import h5py
import process_hdf5 as process
from aolPyModules import tof as tof_module
from aolPyModules.aolUtil import struct
Ne1s = 870.2
def e_energy_prediction_model(params, l3_energy, bc2_energy=None, fee=None,
e_energy=None):
"""Modeling of the electorn energy based on a few parameters."""
K = params['K'].value
IP = params['IP'].value
BC2_nominal = params['BC2_nominal'].value
BC2_factor = params['BC2_factor'].value
fee_factor = params['fee_factor'].value
fee_nominal = params['fee_nominal'].value
if bc2_energy is None:
e_beam_energy = l3_energy
else:
e_beam_energy = l3_energy - BC2_factor * (bc2_energy - BC2_nominal)
if fee is not None:
e_beam_energy -= fee_factor * (fee - fee_nominal)
mod = K * e_beam_energy**2 - IP
if e_energy is None:
return mod
return mod - e_energy
# mod = params['e_0'].value
# mod += params['l3_1'].value * (l3_energy - params['l3_m'].value)
#
# if bc2_energy is not None:
# mod += params['bc2_1'].value * (bc2_energy - params['bc2_m'].value)
##
# if fee is not None:
# mod += params['fee_1'].value * (fee - params['fee_m'].value)
#
# if e_energy is None:
# return mod
# return mod - e_energy
def e_energy_prediction_model_start_params(l3_energy, e_energy,
bc2_energy=[], fee=[]):
params = lmfit.Parameters()
params.add('K', 4.57e-5, min=4.3e-5, max=1e-4)
use_bc2 = len(bc2_energy) > 0
params.add('BC2_nominal', 5e3, min=4.9e3, max=5.1e3, vary=use_bc2)
params.add('BC2_factor', 1, min=0, max=2, vary=use_bc2)
use_fee = len(fee) > 0
params.add('fee_factor', 108 if use_fee else 0, vary=use_fee)
params.add('fee_nominal', np.nanmean(fee) if use_fee else 0, vary=use_fee)
params.add('IP', value=Ne1s, vary=False)
# params.add('e_0', np.mean(e_energy), vary=False)
# params.add('l3_1', 0)
# params.add('l3_m', np.mean(l3_energy), vary=False)
#
# use_bc2 = len(bc2_energy) > 0
# params.add('bc2_1', 0, vary=use_bc2)
# params.add('bc2_m', np.mean(bc2_energy) if use_bc2 else 0, vary=False)
#
# use_fee = len(fee) > 0
# params.add('fee_1', 0, vary=use_fee)
# params.add('fee_m', np.mean(fee) if use_fee else 0, vary=False)
return params
def tof_prediction_model(params, l3_energy, bc2_energy=None, fee=None,
tof=None):
d_eff = params['d_eff_prediction'].value
t_0 = params['t_0_prediction'].value
E_0 = params['E_0_prediction'].value
K = params['K'].value
IP = params['IP'].value
BC2_nominal = params['BC2_nominal'].value
BC2_factor = params['BC2_factor'].value
fee_factor = params['fee_factor'].value
if bc2_energy is None:
e_beam_energy = l3_energy
else:
e_beam_energy = l3_energy - BC2_factor * (bc2_energy - BC2_nominal)
if fee is not None:
e_beam_energy -= fee * fee_factor
mod = t_0 + d_eff / np.sqrt(K * e_beam_energy**2 - IP - E_0)
if tof is None:
return mod
return mod-tof
def tof_prediction_params(l3_energy=[], bc2_energy=[], fee=[],
tof=[]):
params = photon_energy_params(postfix='_prediction')
params.add('K', 4.57e-5, min=4.5e-5, max=1e-4)
use_bc2 = len(bc2_energy) > 0
params.add('BC2_nominal', 5e3, min=4.9e3, max=5.1e3, vary=use_bc2)
params.add('BC2_factor', 1, min=0, max=2, vary=use_bc2)
use_fee = len(fee) > 0
params.add('fee_factor', 108 if use_fee else 0, vary=use_fee)
return params
def photoelectron_energy_model(params, tof, e_pe=None, postfix=''):
t_0 = params['t_0' + postfix].value
E_0 = params['E_0' + postfix].value
d_eff = params['d_eff' + postfix].value
mod = E_0 + (d_eff / (tof - t_0))**2
if e_pe is None:
return mod
return mod - e_pe
def photon_energy_model(params, tof, e_p=None, postfix=''):
e_ip = params['IP'].value
mod = photoelectron_energy_model(params, tof, postfix=postfix) + e_ip
if e_p is None:
return mod
return mod - e_p
def photon_energy_params(postfix=''):
params = lmfit.Parameters()
params.add('d_eff' + postfix, 1.23, min=1, max=2)
params.add('t_0' + postfix, 1.49, min=1.47, max=1.51)
params.add('E_0' + postfix, 0, vary=False)
params.add('IP', Ne1s, vary=False)
return params
def photoelectron_energy_prediction_model(params, l3_energy, bc2_energy=None,
fee=None, tof=None):
return photoelectron_energy_model(params,
tof_prediction_model(
params,
l3_energy=l3_energy,
bc2_energy=bc2_energy,
fee=fee))
def fel_params():
params = lmfit.Parameters()
params.add('K', 1)
return params
def fel_model(params, e_energy, p_energy=None):
K = params['K'].value
mod = K * e_energy**2
if p_energy is None:
return mod
return mod - p_energy
def fel_model_inverted(params, p_energy, e_energy=None):
K = params['K'].value
mod = np.sqrt(p_energy / K)
if e_energy is None:
return mod
return mod - e_energy
def tof_e_energy_params(d_exp_list=[2]):
params = lmfit.Parameters()
params.add('E_0', 0)
params.add('t_0', 1.5, min=1.0, max=2.0)
for d_exp in d_exp_list:
params.add('d_{}'.format(d_exp), 1 if d_exp == 2 else 0)
return params
def tof_e_energy_model(params, tof, energy=None):
E_0 = params['E_0'].value
t_0 = params['t_0'].value
d = {}
for d_exp in range(1, 11):
name = 'd_{}'.format(d_exp)
if name in params.keys():
d[d_exp] = params[name].value
mod = E_0 * np.ones_like(tof)
for d_exp, d_val in d.iteritems():
mod += d_val / (tof - t_0)**d_exp
if energy is None:
return mod
return mod - energy
def tof_photon_energy_params(d_exp_list=[2]):
params = tof_e_energy_params(d_exp_list=d_exp_list)
params.add('IP', Ne1s, vary=False)
return params
def tof_photon_energy_model(params, tof, photon_energy=None):
IP = params['IP'].value
mod = IP + tof_e_energy_model(params, tof)
if photon_energy is None:
return mod
return mod - photon_energy
def tof_e_beam_energy_params(d_exp_list=[2]):
params = tof_photon_energy_params(d_exp_list=d_exp_list)
params.add('K', 2.5)
return params
def tof_e_beam_energy_model(params, tof, e_beam_energy=None):
K = params['K'].value
mod = K * tof_photon_energy_model(params, tof)**2
if e_beam_energy is None:
return mod
return mod - e_beam_energy
def slicing_plot(x, y, z, n_z=6, fig_num=None):
n_rows = int(np.floor(np.sqrt(n_z)))
n_cols = int(np.ceil(float(n_z) / n_rows))
valid = np.isfinite(x) * np.isfinite(y) * np.isfinite(z)
x_limits = np.linspace(x[valid].min(), x[valid].max(), 2**6)
y_limits = np.linspace(y[valid].min(), y[valid].max(), 2**6)
z_limits = np.linspace(z[valid].min(), z[valid].max(), n_z+1)
fig = plt.figure(fig_num)
fig.clf()
ax = None
for i in range(n_z):
I = (z_limits[i] < z) & (z < z_limits[i+1])
img, _, _ = np.histogram2d(x[I], y[I], (x_limits, y_limits))
ax = fig.add_subplot(n_rows, n_cols, i+1, sharex=ax, sharey=ax)
ax.imshow(img.T, aspect='auto', interpolation='none',
extent=(x_limits[0], x_limits[-1],
y_limits[0], y_limits[-1]),
origin='lower')
# ax.plot(x[I], y[I], '.')
def load_tof_to_energy_data(verbose=0):
# if verbose > 0:
# print 'In "load_tof_to_energy_data()".'
calib_runs = [24, 26, 28, 31, 38]
calib_energies = [930, 950, 970, 1000, 1030]
calib_energy_map = dict(zip(calib_runs, calib_energies))
h5_dict = {}
for run in calib_runs:
name = process.h5_file_name_template.format(run)
process.update_with_noise_and_response(name, verbose=verbose)
h5_dict[run] = h5py.File(name, 'r+')
return calib_runs, h5_dict, calib_energy_map
def get_calib_data(plot=False, verbose=0):
# Load the data
calib_runs, h5_dict, calib_energy_map = load_tof_to_energy_data(
verbose=verbose)
# Make the calib data struct
calib_data = struct()
# Make some empty lists
calib_data.e_energy = []
calib_data.p_energy_calib = []
calib_data.p_energy_calib_mean = []
calib_data.tof = []
calib_data.integral = []
calib_data.pulse_energy = []
calib_data.l3_energy = []
calib_data.bc2_energy = []
calib_data.charge = []
calib_data.current_bc2 = []
# pct = []
if plot:
# Make the fee figure
fee_fig = plt.figure('fee hist')
fee_fig.clf()
# Make the energy figure
energy_fig = plt.figure('energy')
energy_fig.clf()
energy_ax_list = []
# All data should be plotted in the last subplot
common_ax = energy_fig.add_subplot(2, 3, 6)
plt.xlabel('L3 energy (MeV)')
i_plot = 0
# For each of the runs
for run, h5 in h5_dict.iteritems():
# Get the fee values
fee = h5['fee_mean'].value
# Check which fee values are actually real
fee_valid = np.isfinite(fee)
# Implement a range selection of the valid fee values
# fee_selection = fee_valid & (0.03 < fee) & (fee < 0.08)
# fee_selection = fee_valid & (0 < fee) & (fee < 10)
fee_selection = fee_valid & (0.04 < fee) & (fee < 0.06)
# Fill the fee plot
if plot:
# Increment the plot number...
i_plot += 1
# ...and make the axis
ax = fee_fig.add_subplot(2, 3, i_plot)
# Make fee histograms
_, bins, _ = ax.hist(fee[fee_valid], bins=100)
# and plot the result
ax.hist(fee[fee_selection], bins=bins)
ax.set_title('run {}'.format(run))
# Get the peak center of the fee filtered peaks
streak_center = h5['streak_peak_center'][fee_selection]
# Create a selection on the peak center based on the distribution
sc_mean = streak_center.mean()
sc_std = streak_center.std()
streak_selection = (
(sc_mean - 3*sc_std < h5['streak_peak_center'][:]) &
(h5['streak_peak_center'][:] < sc_mean + 3*sc_std))
# Get the corrected l3 energy
corrected_energy_l3 = h5['energy_L3_corrected_MeV'][fee_selection]
# and make a similar selection as above
cor_l3_mean = corrected_energy_l3.mean()
cor_l3_std = corrected_energy_l3.std()
cor_l3_selection = (
(cor_l3_mean - 3*cor_l3_std <
h5['energy_L3_corrected_MeV'][:]) &
(h5['energy_L3_corrected_MeV'][:] <
cor_l3_mean + 3*cor_l3_std))
# The total shot selection taked all the three above created selections
# into account
selection = fee_selection * streak_selection * cor_l3_selection
# Append the data in the lists
calib_data.e_energy.append(h5['energy_L3_corrected_MeV'][selection])
calib_data.l3_energy.append(h5['raw/energy_L3_MeV'][selection])
calib_data.bc2_energy.append(h5['energy_BC2_MeV'][selection])
calib_data.p_energy_calib.append(
[calib_energy_map[run]]*selection.sum())
calib_data.p_energy_calib_mean.append([calib_energy_map[run]])
calib_data.tof.append(h5['streak_peak_center'][selection])
calib_data.integral.append(h5['streak_peak_integral'][selection])
calib_data.pulse_energy.append(fee[selection])
calib_data.charge.append(h5['raw/charge_nC'][selection])
calib_data.current_bc2.append(h5['raw/current_BC2_A'][selection])
# pct.append(h5['raw/phase_cavity_times'][selection, 1])
# pct[-1] -= pct[-1][np.isfinite(pct[-1])].mean()
# Populate the energy plot
if plot:
# Make the axis
ax = energy_fig.add_subplot(2, 3, i_plot)
energy_ax_list.append(ax)
# plt.plot(energy_l3, streak_center, '.')
plt.scatter(calib_data.l3_energy[-1],
calib_data.tof[-1],
s=1, c=calib_data.pulse_energy[-1],
linewidths=(0,), alpha=1)
if i_plot % 3 == 1:
plt.ylabel('Photoline center (us)')
if i_plot > 3:
plt.xlabel('L3 energy (MeV)')
ax.set_title('run {}'.format(run))
common_ax.plot(calib_data.l3_energy[-1], calib_data.tof[-1], '.')
calib_data.tof_mean = [[np.mean(tof_vals)] for tof_vals in calib_data.tof]
# Convert the data lists to arrays
if verbose:
print 'Making data arrays.'
for k, v in calib_data.toDict().iteritems():
setattr(calib_data, k, np.concatenate(v))
return calib_data
def make_tof_to_energy_matrix(energy_scale_eV, plot=False, verbose=0):
# Get time to energy conversion parameters
time_to_energy_params, tof_prediction_params = \
fit_tof_prediction(plot=plot, verbose=verbose)
# Get the calib data
calib_data = get_calib_data(plot=plot, verbose=verbose)
# and unpack the needed parameters
integral = calib_data.integral
pulse_energy = calib_data.pulse_energy
tof = calib_data.tof
# e_energy = calib_data.e_energy
# Load the data files
_, h5_dict, _ = load_tof_to_energy_data()
# and get the time scale
time_scale = h5_dict.values()[0]['raw/time_scale'].value
# e_energy = np.concatenate(e_energy)
# p_energy_axis = np.linspace(p_energy_calib.min() * 0.99,
# p_energy_calib.max() * 1.01,
# 2**10)
# tof = np.concatenate(tof)
# integral = np.concatenate(integral)
# pulse_energy = np.concatenate(pulse_energy)
# e_e_axis = np.linspace(4500, 4800, 2**10)
tof_lims = np.linspace(1.58, 1.66, 2**10 + 1)
tof_axis = (tof_lims[:-1] + tof_lims[1:]) / 2
# Convert the effective length to real units (mm)
D_mm = (time_to_energy_params['d_eff'].value * tof_module.c_0_mps * 1e-3 /
np.sqrt(tof_module.m_e_eV / 2))
trans_mat, _ = tof_module.get_time_to_energy_conversion(
time_scale, energy_scale_eV, verbose=(verbose > 1),
D_mm=D_mm,
prompt_us=time_to_energy_params['t_0'].value,
t_offset_us=0,
E_offset_eV=time_to_energy_params['E_0'].value)
trans_mat = trans_mat.toarray()
if plot:
plt.figure('trans mat raw')
plt.clf()
plt.imshow(trans_mat, interpolation='none', origin='low',
aspect='auto', extent=(time_scale.min(), time_scale.max(),
energy_scale_eV.min(),
energy_scale_eV.max()))
norm_integral = integral/pulse_energy
binned_norm_int = np.empty_like(tof_axis)
for i_bin in range(len(binned_norm_int)):
I = ((tof_lims[i_bin] < tof) &
(tof < tof_lims[i_bin + 1]) &
np.isfinite(norm_integral))
binned_norm_int[i_bin] = norm_integral[I].mean()
I = np.isfinite(binned_norm_int)
binned_norm_int = binned_norm_int[I]
tof_binned_norm_int = tof_axis[I]
trans_p = np.polyfit(tof, norm_integral, 4)
trans_model = lmfit.models.SkewedGaussianModel()
trans_model.set_param_hint('center', value=1.62, min=1.58, max=1.65)
trans_model.set_param_hint('sigma', value=0.1, min=0, max=1)
trans_model.set_param_hint('amplitude', value=1, min=0)
trans_model.set_param_hint('gamma', value=1, min=0)
trans_params = trans_model.make_params()
trans_result = trans_model.fit(norm_integral, x=tof,
params=trans_params)
binned_result = trans_model.fit(binned_norm_int, x=tof_binned_norm_int,
params=trans_params)
if verbose:
print '\nBinned fit result:'
print binned_result.fit_report()
print '\nRaw data fit result:'
print trans_result.fit_report()
if plot:
plt.figure('intensity')
plt.clf()
ax1 = plt.subplot(131)
plt.plot(tof, integral, '.', label='direct integral')
plt.xlabel('tof')
plt.ylabel('peak integral')
plt.legend(fontsize='medium')
ax2 = plt.subplot(132, sharex=ax1)
plt.plot(tof, integral/pulse_energy, '.', label='raw (integral / fee)')
plt.plot(tof_axis, np.polyval(trans_p, tof_axis), '-c',
label='polynomial fit')
plt.plot(tof_axis,
trans_model.eval(x=tof_axis, **trans_result.best_values),
label='fit to raw')
plt.xlabel('tof')
plt.ylabel('fee normalized peak integral')
plt.legend(fontsize='medium')
plt.subplot(133, sharex=ax2, sharey=ax2)
plt.plot(tof_binned_norm_int, binned_norm_int, '.',
label='binned data')
plt.plot(tof_axis,
trans_model.eval(x=tof_axis, **trans_result.best_values),
label='fitt to raw')
plt.plot(tof_axis,
trans_model.eval(x=tof_axis, **binned_result.best_values),
label='fit to binned')
plt.xlabel('tof')
plt.ylabel('fee normalized peak integral')
plt.legend(fontsize='medium')
plt.ylim(0, 0.11)
plt.savefig('figures/timeToEnergyIntensity.png')
transmission_factors = 1. / trans_model.eval(x=time_scale,
**binned_result.best_values)
transmission_factors[(time_scale < tof_lims.min()) |
(tof_lims.max() < time_scale)] = 0.0
transmission_factors[~np.isfinite(transmission_factors)] = 0.0
transmission_factors /= transmission_factors[
transmission_factors > 0].min()
return (trans_mat * transmission_factors,
time_scale,
energy_scale_eV,
time_to_energy_params,
tof_prediction_params)
def fit_tof_prediction(plot=False, verbose=0):
if verbose > 0:
print 'In "fit_tof_prediction()".'
# Get the calibration data
calib_data = get_calib_data(plot=plot, verbose=verbose)
# Unpack calib data
tof_mean = calib_data.tof_mean
p_energy_calib_mean = calib_data.p_energy_calib_mean
l3_energy = calib_data.l3_energy
bc2_energy = calib_data.bc2_energy
pulse_energy = calib_data.pulse_energy
tof = calib_data.tof
p_energy_calib = calib_data.p_energy_calib
# Fit the time to energuy conversion using the values given by the
# operators
time_to_energy_params = photon_energy_params()
res = lmfit.minimize(photon_energy_model, time_to_energy_params,
args=(tof_mean, p_energy_calib_mean))
if verbose:
print 'Time to energy conversion fit results:'
lmfit.report_fit(res)
# Select the parameters to be used in the tof time prediction calculation
var_dict = {'l3_energy': l3_energy,
'bc2_energy': bc2_energy,
'fee': pulse_energy,
'tof': tof
}
# Create the parameters for the tof prediction
prediction_params = tof_prediction_params(**var_dict)
# Update the parameters from the time to energy conversion
for k, v in time_to_energy_params.iteritems():
k_pred = k
if k != 'IP':
k_pred += '_prediction'
prediction_params[k_pred].value = v.value
# prediction_params[k_pred].vary = False
# Perform the fit
res = lmfit.minimize(tof_prediction_model, prediction_params,
kws=var_dict)
# Present the results
if verbose:
print 'Tof prediction fit report:'
lmfit.report_fit(res)
# Plot the differences between the measured tof and the predicted tof
if plot:
time_eps_fig = plt.figure('time eps')
time_eps_fig.clf()
plt.scatter(tof,
tof_prediction_model(prediction_params,
**var_dict),
s=1, c=pulse_energy, linewidths=(0,))
plt.xlabel('TOF (us)')
plt.ylabel('TOF prediction error (us)')
cbar = plt.colorbar()
cbar.set_label('fee (mJ)')
plt.savefig('figures/tof_prediction.png')
# Look at the time to energy conversion
if plot:
time_axis = np.linspace(min(tof), max(tof), 2**8)
plt.figure('time to energy')
plt.clf()
combined_params = lmfit.Parameters()
for pars in [time_to_energy_params, prediction_params]:
for k, v in pars.iteritems():
combined_params.add(k, v.value)
# plt.plot(
# tof,
# photoelectron_energy_prediction_model(combined_params, **var_dict),
# '.', label='prediction + calibration')
plt.plot(tof, p_energy_calib - Ne1s, '.',
label='Set $E_p - E_{Ne\,1s} = E_e$ vs. TOF')
plt.plot(tof_mean, p_energy_calib_mean - Ne1s, 'o',
label='$E_e$v s. mean TOF')
# plt.plot(time_axis,
# photoelectron_energy_model(prediction_params,
# time_axis,
# postfix='_prediction'),
# label='t -> E tof prediction')
plt.plot(time_axis,
photoelectron_energy_model(time_to_energy_params,
time_axis),
label='t -> E calibration')
plt.xlabel('time (us)')
plt.ylabel('photo electron energy (eV)')
plt.legend(fontsize='medium')
plt.grid(True)
plt.savefig('figures/timeToEnergy.png')
return time_to_energy_params, prediction_params
if __name__ == '__main__':
verbose = 2
# calib_data = get_calib_data(plot=False)
energy_scale = np.linspace(40, 160, 2**8)
(M, time_scale, energy_scale,
params_time_to_energy,
params_tof_prediction) = make_tof_to_energy_matrix(
energy_scale_eV=energy_scale, plot=True, verbose=2)
h5 = process.load_file('h5_files/run118_all.h5', verbose=1)
process.list_hdf5_content(h5)
raw = h5['raw']
time_scale = raw['time_scale'][:]
time_signal_dset = h5['filtered_time_signal']
center_dset = h5['streak_peak_center']
n_events = len(raw['fiducial'])
energy_scale = h5['energy_scale_eV'][:]
energy_signal_dset = h5['energy_signal']
predicted_energy_dset = h5['photoelectron_energy_prediction_eV']
selected_shots = list(np.linspace(0, n_events, 10, endpoint=False))
plt.figure('time_traces')
plt.clf()
ax1 = plt.subplot(221)
ax2 = plt.subplot(222)
ax3 = plt.subplot(223)
ax4 = plt.subplot(224)
ax1.plot(time_scale, time_signal_dset[selected_shots, :].T)
ax2.plot(np.tile(time_scale, (len(selected_shots), 1)).T -
center_dset[selected_shots],
time_signal_dset[selected_shots, :].T)
ax3.plot(energy_scale, energy_signal_dset[selected_shots, :].T)
ax4.plot(np.tile(energy_scale, (len(selected_shots), 1)).T -
predicted_energy_dset[selected_shots],
energy_signal_dset[selected_shots, :].T)
# params_time_to_energy, params_tof_prediction = \
# fit_tof_prediction(plot=True, verbose=verbose)
# h5 = process.load_file('h5_files/run108_all.h5', verbose=1)
#
# n_evnts = len(h5['raw/fiducial'])
# time_scale = h5['raw/time_scale'].value
# time_signal_dset = h5['filtered_time_signal']
# time_signal_raw_dset = h5['raw/time_signal']
# i_evt = np.random.randint(n_evnts)
# t_trace = time_signal_dset[i_evt, :]
# t_trace_raw = time_signal_raw_dset[i_evt, :]
# e_trace = M.dot(t_trace)
#
# plt.figure('tof to energy conversion test')
# plt.clf()
# plt.subplot(121)
# plt.plot(time_scale, t_trace, label='wiener deconv.')
# plt.plot(time_scale, t_trace_raw, label='raw')
# plt.xlabel('tof (us)')
# plt.legend(loc='best', fontsize='small')
# plt.subplot(122)
# plt.plot(energy_scale, M.dot(t_trace))
# plt.xlabel('Energy (eV)')
| gpl-2.0 |
enigmampc/catalyst | catalyst/exchange/live_graph_clock.py | 1 | 3284 | from time import sleep
import pandas as pd
from catalyst.constants import LOG_LEVEL
from catalyst.exchange.utils.stats_utils import prepare_stats
from catalyst.gens.sim_engine import (
BAR,
SESSION_START,
SESSION_END,
)
from logbook import Logger
log = Logger('LiveGraphClock', level=LOG_LEVEL)
class LiveGraphClock(object):
"""Realtime clock for live trading.
This class is a drop-in replacement for
:class:`zipline.gens.sim_engine.MinuteSimulationClock`.
This mixes the clock with a live graph.
Notes
-----
This seemingly awkward approach allows us to run the program using a single
thread. This is important because Matplotlib does not play nice with
multi-threaded environments. Zipline probably does not either.
Matplotlib has a pause() method which is a wrapper around time.sleep()
used in the SimpleClock. The key difference is that users
can still interact with the chart during the pause cycles. This is
what enables us to keep a single thread. This is also why we are not using
the 'animate' callback of Matplotlib. We need to direct access to the
__iter__ method in order to yield events to Zipline.
The :param:`time_skew` parameter represents the time difference between
the exchange and the live trading machine's clock. It's not used currently.
"""
def __init__(self, sessions, context, callback=None,
time_skew=pd.Timedelta('0s'), start=None, end=None):
self.sessions = sessions
self.time_skew = time_skew
self._last_emit = None
self._before_trading_start_bar_yielded = True
self.context = context
self.callback = callback
self.start = start
self.end = end
def __iter__(self):
from matplotlib import pyplot as plt
self.handle_late_start()
yield pd.Timestamp.utcnow(), SESSION_START
while True:
current_time = pd.Timestamp.utcnow()
current_minute = current_time.floor('1T')
if self.end is not None and current_minute >= self.end:
break
if self._last_emit is None or current_minute > self._last_emit:
log.debug('emitting minutely bar: {}'.format(current_minute))
self._last_emit = current_minute
yield current_minute, BAR
recorded_cols = list(self.context.recorded_vars.keys())
df, _ = prepare_stats(
self.context.frame_stats, recorded_cols=recorded_cols
)
self.callback(self.context, df)
else:
# I can't use the "animate" reactive approach here because
# I need to yield from the main loop.
# Workaround: https://stackoverflow.com/a/33050617/814633
plt.pause(1)
yield current_minute, SESSION_END
def handle_late_start(self):
if self.start:
time_diff = (self.start - pd.Timestamp.utcnow())
log.info(
'The algorithm is waiting for the specified '
'start date: {}'.format(self.start))
sleep(time_diff.seconds)
while pd.Timestamp.utcnow() < self.start:
pass
| apache-2.0 |
i-sultan/Smart-Trader | src/st_helpers.py | 1 | 8647 | """ File for helper functions used by more than one class in ST system.
"""
#.-------------------.
#| imports |
#'-------------------'
import pandas as pd
import numpy as np
import pandas_datareader.data as web
import matplotlib.pyplot as plt
from cycler import cycler
from pandas.tools.plotting import table
#.---------------------.
#| exception classes |
#'---------------------'
class Error(Exception):
"""Base class for exceptions."""
pass
class ValidationError(Error):
"""Exception raised for errors in the input.
Attributes:
- message (str): explanation of the error.
"""
def __init__(self, message):
self.message = message
#.---------------.
#| classes |
#'---------------'
class PlotTypes:
Plot, Hist, Scatter, Table = range(4)
class PlotInfo(object):
"""Class that contains all info needed for plotting"""
def __init__(self, df, title, xlabel, ylabel, type, line_styles = None):
self.df = df
self.title = title
self.xlabel = xlabel
self.ylabel = ylabel
self.type = type
self.line_styles = line_styles
class TableInfo(object):
"""Class that contains all info needed for table printing"""
def __init__(self, df, title):
self.df = df
self.title = title
self.type = PlotTypes.Table
class StColors(object):
orange = '#f5be2e'
bright_green = '#b7f731'
dark_grey = '#191919'
mid_grey = '#323232'
light_grey = '#c8c8c8'
#.-------------------------.
#| helper functions |
#'-------------------------'
def get_data_web(symbols, dates, force_spy = True) :
"""Read stock data (adjusted close) for given symbols from web """
df = pd.DataFrame(index=dates)
if force_spy and 'SPY' not in symbols: # add SPY for reference, if absent
symbols.insert(0, 'SPY')
for symbol in symbols:
append_days = 21 #add enough days to cover first na's of the moving window
df_temp = web.DataReader(symbol, data_source='yahoo', start=dates[0]-append_days, end=dates[-1])
df_temp = df_temp.rename(columns={'Adj Close': symbol})
if len(symbols) == 1:
#indicators: Daily, SMA(%), EMA(%), SMA Momentum, Volatility, Volume
df = df_temp[[symbol, 'Volume', 'Open', 'Close', 'Low', 'High']]
pd.options.mode.chained_assignment = None #removes unnecessary warning
df['Volume'] = df['Volume'].pct_change(periods=1)
df['Open'] = (df['Open']-df['Close'])/df['Close']
df['High'] = (df['High']-df['Close'])/df['Close']
df['Low'] = (df['Low' ]-df['Close'])/df['Close']
df['SMA'] = df[symbol].pct_change(periods=1).rolling(window = 10).mean()
df['EWMA']= df[symbol].pct_change(periods=1).ewm(span = 10).mean()
df['MOM'] = df[symbol].pct_change(periods=10).rolling(window = 10).mean()
df['STD'] = df[symbol].pct_change(periods=1).rolling(window = 10).std()
else:
df = df.join(df_temp[[symbol]])
df = df.ix[append_days:,:]
if force_spy and symbol == 'SPY': # drop dates SPY did not trade
df = df.dropna(subset=['SPY'])
if not force_spy:
df = df.dropna(subset=[df.columns[0]])
return df
def get_tableau_colors():
tableau20 = [(31, 119, 180) , (174, 199, 232), (255, 127, 14) , (255, 187, 120), (44 , 160, 44 ),
(152, 223, 138), (214, 39, 40) , (255, 152, 150), (148, 103, 189), (197, 176, 213),
(140, 86, 75) , (196, 156, 148), (227, 119, 194), (247, 182, 210), (127, 127, 127),
(199, 199, 199), (188, 189, 34) , (219, 219, 141), (23, 190, 207) , (158, 218, 229)]
return [(r/255., g/255., b/255.) for (r,g,b) in tableau20]
def plot_data(fig, plot_info):
"""Plot with specified title and axis labels."""
ax = fig.add_subplot(111)
#configure surrounding area (edges, ticks, colors)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.set_prop_cycle(cycler('color', get_tableau_colors()))
#plot data
df = plot_info.df
if plot_info.type == PlotTypes.Plot:
if plot_info.line_styles:
df.plot(ax=ax, fontsize=12, lw=2.5, style = plot_info.line_styles)
else:
df.plot(ax=ax, fontsize=12, lw=2.5)
elif plot_info.type == PlotTypes.Hist:
df.plot.hist(ax=ax, fontsize=12, bins = 25, alpha = .8, lw=1)
elif plot_info.type == PlotTypes.Scatter:
df.plot.scatter(x = df.columns[0], y = df.columns[1], ax=ax, fontsize=12, lw=.5)
#plot linear fit on top of scatter plot, and display alpha\beta
beta, alpha = np.polyfit(df.ix[:,0], df.ix[:,1], 1)
ax.plot(df.ix[:,0], beta * df.ix[:,0] + alpha, '-', lw = 3, c = (1, 0.5, 0.17))
text = r'$\alpha = $' + '%.2f\n'%alpha + r'$\beta = $' + '%.2f'%beta
props = dict(boxstyle='round', facecolor=(1,0.5,0.17), alpha=.7)
ax.text(0.05, 0.95, text, transform=ax.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
#configure interior (legend, grid)
if plot_info.type != PlotTypes.Scatter:
ax.legend(bbox_to_anchor=(0, 1), loc='upper left', ncol=1)
ax.xaxis.grid(True, linestyle='--', alpha =.1, lw=1)
ax.yaxis.grid(True, linestyle='--', alpha =.2, lw=1)
#set title and x,y labels
fig.suptitle(plot_info.title, fontsize=20, fontweight='bold')
ax.set_xlabel(plot_info.xlabel, fontsize=16, fontweight='bold')
ax.set_ylabel(plot_info.ylabel, fontsize=16, fontweight='bold')
#setup padding
fig.tight_layout()
fig.subplots_adjust(top=0.9) #set axes area percentage of the figure
def tabulate_data(fig, table_info):
"""Prepare a matplotlib table using provided table info and adding result to figure."""
fig.suptitle(table_info.title, fontsize=20, fontweight='bold')
ax = fig.add_subplot(111)
#configure table colors
tableau20 = get_tableau_colors()
color_1 = tableau20[0]
color_2 = tableau20[1]
#setup table at the middle of the figure
df = table_info.df
df.index = ' ' + df.index + ' ' #adding spaces to index(label) column since label column is fixed width
nrows, ncols = df.shape
colwidth = 0.16
rowheight = 0.1
tab = table(ax, np.round(df, 2), loc='upper center', bbox=[.5-ncols*colwidth/2,.5-nrows*rowheight/2,ncols*colwidth,nrows*rowheight])
for key, cell in tab.get_celld().items():
#set cell properties
cell._text.set_size(14)
cell.set_edgecolor('w')
cell.set_linestyle('-')
cell.set_facecolor('w')
cell.set_linewidth(1)
#change color of even rows vs. odd rows
row, col = key
if row%2 == 0:
cell.set_facecolor(color_1)
cell._text.set_color('w')
else:
cell.set_facecolor(color_2)
cell._text.set_color([i*0.65 for i in color_1])
#set color for header and index column
if row == 0 or col == -1:
cell._text.set_color('w')
cell._text.set_weight('bold')
cell.set_facecolor([i*0.65 for i in color_1])
if row == 0:
cell.set_height(cell.get_height()*1.4) #makes first row a bit taller
ax.axis('off')
def daily_percent_returns(df):
daily_percent_returns = df.pct_change(periods=1)*100
daily_percent_returns.ix[0,:] = 0
return daily_percent_returns
def daily_returns(df):
daily_returns = df-df.shift(1)
daily_returns.ix[0,:] = 0
return daily_returns
def cumulative_percent_returns(df):
cumulative_returns = (df/df.ix[0]-1)*100
return cumulative_returns
def calc_sharpe(df, risk_free_rate = 0): #annual risk_free_rate of 5% should be given as 0.05
df = df.pct_change() - risk_free_rate/252
sharpe_ratio = np.sqrt(252) * df.mean() / df.std()
sharpe_ratio.name = 'Sharpe ratio'
return sharpe_ratio
def calc_errors(error_list, relative_error_list, hit_list):
""" Calculate different types of errors (MSE, RMSE, MAE, MAPE, hit_rate)
based on lists of error and relative errors
"""
absolute_error_list = np.abs(error_list)
MSE = np.mean(absolute_error_list**2)
RMSE= np.sqrt(MSE)
MAE = np.mean(absolute_error_list)
MAPE= np.mean(np.abs(relative_error_list))
hit_rate = float(sum(hit_list))/len(hit_list)
return (MSE, RMSE, MAE, MAPE, hit_rate) | gpl-3.0 |
lmartinet/ISTEX_MentalRotation | LDA.py | 2 | 7670 | # -*- coding: utf-8 -*-
#
# This file is part of Istex_Mental_Rotation.
# Copyright (C) 2016 3ST ERIC Laboratory.
#
# This is a free software; you can redistribute it and/or modify it
# under the terms of the Revised BSD License; see LICENSE file for
# more details.
# Load the SVD representation of documents done for the whole corpus of ISTEX and UCBL.
# Classify the documents by clusters using the LatentDirichletAllocation method. Try with different number of clusters.
# Extract the key words representing each cluster.
# co-author : Lucie Martinet <[email protected]>
# co-author : Hussein AL-NATSHEH <[email protected].>
# Affiliation: University of Lyon, ERIC Laboratory, Lyon2
# Thanks to ISTEX project for the foundings
import os, argparse, pickle, json
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.decomposition import TruncatedSVD
import IPython
from sklearn.decomposition import LatentDirichletAllocation
import numpy as np
import sys
reload(sys)
sys.setdefaultencoding('utf8')
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print("Topic #%d:" % topic_idx)
print(" | ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
def write_top_words(model, feature_names, n_top_words, outfile):
for topic_idx, topic in enumerate(model.components_):
outfile.write("Topic #%d:" % topic_idx)
outfile.write("\n")
outfile.write(" | ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
outfile.write("\n")
outfile.write("\n")
def KeysValuesInit(nb_articles) :
keys = np.array(range(nb_articles),dtype=np.object)
values = np.array(range(nb_articles),dtype=np.object)
for i, (key,value) in enumerate(input_dict.items()) :
keys[i] = key
values[i] = value
return keys, values
def statisticsClusterSelection(cluster, document_id, docs_topic, selection, stat_selection, outfile_pointer):
if selection in document_id and outfile_pointer != None : # keys[t] is a string, the name of the document
max_index = np.argmax(docs_topic[cluster], axis=0) #docs_topic[t]: dictionary of the clusters with the likelihood to belong to this cluster
outfile_pointer.write(str(document_id) + " best cluster : " + str(max_index) + " likelihood: " + str(docs_topic[cluster][max_index])) # find the index of one list, with a numpy array format
if max_index not in stat_selection :
stat_selection[max_index] = 0
stat_selection[max_index] += 1
outfile_pointer.write("\n")
return stat_selection
# Compute the clusters of document and write the results in output files.
# Need the
def statiticsClusters(nb_cluster, tf_idf_bow, tf_feature_names, ucbl_output=None, istex_output = None ,max_iter=5, learning_method='online', learning_offset=50., random_state=0):
lda = LatentDirichletAllocation(n_topics=nb_cluster, max_iter=max_iter, learning_method=learning_method, learning_offset=learning_offset, random_state=random_state)
lda.fit(tf_idf_bow)
docs_topic = lda.transform(tf_idf_bow)
list_ucbl = dict()
list_mristex = dict()
for t in range(len(docs_topic)) :
list_ucbl = statisticsClusterSelection(t, keys[t], docs_topic, "UCBL", list_ucbl, ucbl_output)
list_mristex = statisticsClusterSelection(t, keys[t], docs_topic, "MRISTEX", list_mristex, istex_output)
generic.write("Total number of topics: "+str(nb_cluster))
generic.write("\nNumber of topics for ucbl: "+str(len(list_ucbl)))
generic.write("\nNumber of topics for istex: "+str(len(list_mristex)))
ucbl_out.write("\nNumber of topics: "+str(len(list_ucbl))+"\n")
ucbl_out.write("Total number of topics: "+str(i)+"\n\n")
istex_out.write("Number of topics: "+str(len(list_mristex))+"\n\n")
generic.write("\nTop words\n")
write_top_words(lda, tf_feature_names, 15, generic)
generic.write("End top words")
vocab = tf_idf_vectorizer.get_feature_names()
generic.write('size of the vocabulary:'+str(len(vocab)))
generic.write("\nUCBL in topics :\n")
for t in list_ucbl :
generic.write("Cluster " + str(t) + " UCBL Nb : " + str(list_ucbl[t]) + "\n")
generic.write("\nMR ISTEX in topics :\n")
for t in list_mristex :
generic.write("Cluster " + str(t) + " MR ISTEX Nb : " + str(list_mristex[t]) + "\n")
generic.write("\n\n")
print "Nb clusters ", i, " Nb ucbl clusters " , len(list_ucbl.values()) , min(list_ucbl.values()), " Nb istex cluster ",len(list_mristex), min(list_mristex.values()) # how many documents in the cluster containing less ucbl documents
if __name__ == "__main__" :
parser = argparse.ArgumentParser()
parser.add_argument("--input_file", default='results.pickle', type=str) # is a .json file
parser.add_argument("--output_file", default='results/results_lda.txt', type=str) # is a .json file
parser.add_argument("--lemmatizer", default=0, type=int) # for using lemmatization_tokenizer
parser.add_argument("--mx_ngram", default=2, type=int) # the upper bound of the ngram range
parser.add_argument("--mn_ngram", default=1, type=int) # the lower bound of the ngram range
parser.add_argument("--stop_words", default=1, type=int) # filtering out English stop-words
parser.add_argument("--vec_size", default=100, type=int) # the size of the vector in the semantics space
parser.add_argument("--min_count", default=1, type=int) # minimum frequency of the token to be included in the vocabulary
parser.add_argument("--max_df", default=0.95, type=float) # how much vocabulary percent to keep at max based on frequency
parser.add_argument("--debug", default=0, type=int) # embed IPython to use the decomposed matrix while running
parser.add_argument("--compress", default="json", type=str) # for dumping resulted files
parser.add_argument("--out_dir", default="results", type=str) # name of the output directory
parser.add_argument("--min_nb_clusters", default=2, type=int) # minimum number of cluster we try
parser.add_argument("--max_nb_clusters", default=10, type=int) # maximum number of cluster we try
args = parser.parse_args()
input_file = args.input_file
output_file = args.output_file
out_dir = args.out_dir
lemmatizer = args.lemmatizer
min_nb_clusters = args.min_nb_clusters
max_nb_clusters = args.max_nb_clusters
if lemmatizer:
lemmatizer = Lemmatizer()
else:
lemmatizer = None
mx_ngram = args.mx_ngram
mn_ngram = args.mn_ngram
stop_words = args.stop_words
if stop_words:
stop_words = 'english'
else:
stop_words = None
n_components = args.vec_size
min_count = args.min_count
max_df = args.max_df
debug = args.debug
compress = args.compress
out_dir = args.out_dir
# instead of recomputing the vectors, we should use the one of the complete experiment, so use pickle load
f = open(input_file, "r")
input_dict = pickle.load(f)
nb_articles = len(input_dict)
f.close()
keys, values = KeysValuesInit(nb_articles)
tf_idf_vectorizer = TfidfVectorizer(input='content', analyzer='word', stop_words=stop_words, tokenizer=lemmatizer,
min_df=min_count, ngram_range=(mn_ngram, mx_ngram), max_df=max_df)
tf_idf_bow = tf_idf_vectorizer.fit_transform(values)
tf_feature_names = tf_idf_vectorizer.get_feature_names()
generic = open(output_file, "w")
ucbl_out = open(os.path.join(out_dir, "lda_ucbl_cluster.txt"), "w")
istex_out = open(os.path.join(out_dir, "lda_mristex_cluster.txt"), "w")
for i in range(min_nb_clusters, max_nb_clusters) :
statiticsClusters(i, tf_idf_bow, tf_feature_names, ucbl_out, istex_out ,max_iter=5, learning_method='online', learning_offset=50., random_state=0)
generic.close()
ucbl_out.close()
istex_out.close()
| bsd-3-clause |
kevin-intel/scikit-learn | sklearn/experimental/enable_halving_search_cv.py | 11 | 1226 | """Enables Successive Halving search-estimators
The API and results of these estimators might change without any deprecation
cycle.
Importing this file dynamically sets the
:class:`~sklearn.model_selection.HalvingRandomSearchCV` and
:class:`~sklearn.model_selection.HalvingGridSearchCV` as attributes of the
`model_selection` module::
>>> # explicitly require this experimental feature
>>> from sklearn.experimental import enable_halving_search_cv # noqa
>>> # now you can import normally from model_selection
>>> from sklearn.model_selection import HalvingRandomSearchCV
>>> from sklearn.model_selection import HalvingGridSearchCV
The ``# noqa`` comment comment can be removed: it just tells linters like
flake8 to ignore the import, which appears as unused.
"""
from ..model_selection._search_successive_halving import (
HalvingRandomSearchCV,
HalvingGridSearchCV
)
from .. import model_selection
# use settattr to avoid mypy errors when monkeypatching
setattr(model_selection, "HalvingRandomSearchCV",
HalvingRandomSearchCV)
setattr(model_selection, "HalvingGridSearchCV",
HalvingGridSearchCV)
model_selection.__all__ += ['HalvingRandomSearchCV', 'HalvingGridSearchCV']
| bsd-3-clause |
more1/ThinkStats2 | code/survival.py | 65 | 17881 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import numpy as np
import pandas
import nsfg
import thinkstats2
import thinkplot
"""
Outcome codes from http://www.icpsr.umich.edu/nsfg6/Controller?
displayPage=labelDetails&fileCode=PREG§ion=&subSec=8016&srtLabel=611932
1 LIVE BIRTH 9148
2 INDUCED ABORTION 1862
3 STILLBIRTH 120
4 MISCARRIAGE 1921
5 ECTOPIC PREGNANCY 190
6 CURRENT PREGNANCY 352
"""
FORMATS = ['pdf', 'eps', 'png']
class SurvivalFunction(object):
"""Represents a survival function."""
def __init__(self, cdf, label=''):
self.cdf = cdf
self.label = label or cdf.label
@property
def ts(self):
return self.cdf.xs
@property
def ss(self):
return 1 - self.cdf.ps
def __getitem__(self, t):
return self.Prob(t)
def Prob(self, t):
"""Returns S(t), the probability that corresponds to value t.
t: time
returns: float probability
"""
return 1 - self.cdf.Prob(t)
def Probs(self, xs):
"""Gets probabilities for a sequence of values."""
return [self.Prob(x) for x in xs]
def Mean(self):
"""Mean survival time."""
return self.cdf.Mean()
def Items(self):
"""Sorted list of (t, s) pairs."""
return zip(self.ts, self.ss)
def Render(self):
"""Generates a sequence of points suitable for plotting.
returns: tuple of (sorted times, survival function)
"""
return self.ts, self.ss
def MakeHazard(self, label=''):
"""Computes the hazard function.
sf: survival function
returns: Pmf that maps times to hazard rates
"""
ss = self.ss
lams = {}
for i, t in enumerate(self.ts[:-1]):
hazard = (ss[i] - ss[i+1]) / ss[i]
lams[t] = hazard
return HazardFunction(lams, label=label)
def MakePmf(self, filler=None):
"""Makes a PMF of lifetimes.
filler: value to replace missing values
returns: Pmf
"""
pmf = thinkstats2.Pmf()
for val, prob in self.cdf.Items():
pmf.Set(val, prob)
cutoff = self.cdf.ps[-1]
if filler is not None:
pmf[filler] = 1-cutoff
return pmf
def RemainingLifetime(self, filler=None, func=thinkstats2.Pmf.Mean):
"""Computes remaining lifetime as a function of age.
func: function from conditional Pmf to expected liftime
returns: Series that maps from age to remaining lifetime
"""
pmf = self.MakePmf(filler=filler)
d = {}
for t in sorted(pmf.Values())[:-1]:
pmf[t] = 0
pmf.Normalize()
d[t] = func(pmf) - t
#print(t, d[t])
return pandas.Series(d)
class HazardFunction(object):
"""Represents a hazard function."""
def __init__(self, d, label=''):
"""Initialize the hazard function.
d: dictionary (or anything that can initialize a series)
label: string
"""
self.series = pandas.Series(d)
self.label = label
def __getitem__(self, t):
return self.series[t]
def Render(self):
"""Generates a sequence of points suitable for plotting.
returns: tuple of (sorted times, hazard function)
"""
return self.series.index, self.series.values
def MakeSurvival(self, label=''):
"""Makes the survival function.
returns: SurvivalFunction
"""
ts = self.series.index
ss = (1 - self.series).cumprod()
cdf = thinkstats2.Cdf(ts, 1-ss)
sf = SurvivalFunction(cdf, label=label)
return sf
def Extend(self, other):
"""Extends this hazard function by copying the tail from another.
other: HazardFunction
"""
last = self.series.index[-1]
more = other.series[other.series.index > last]
self.series = pandas.concat([self.series, more])
def ConditionalSurvival(pmf, t0):
"""Computes conditional survival function.
Probability that duration exceeds t0+t, given that
duration >= t0.
pmf: Pmf of durations
t0: minimum time
returns: tuple of (ts, conditional survivals)
"""
cond = thinkstats2.Pmf()
for t, p in pmf.Items():
if t >= t0:
cond.Set(t-t0, p)
return SurvivalFunction(thinkstats2.Cdf(cond))
def PlotConditionalSurvival(durations):
"""Plots conditional survival curves for a range of t0.
durations: list of durations
"""
pmf = thinkstats2.Pmf(durations)
times = [8, 16, 24, 32]
thinkplot.PrePlot(len(times))
for t0 in times:
sf = ConditionalSurvival(pmf, t0)
label = 't0=%d' % t0
thinkplot.Plot(sf, label=label)
thinkplot.Show()
def PlotSurvival(complete):
"""Plots survival and hazard curves.
complete: list of complete lifetimes
"""
thinkplot.PrePlot(3, rows=2)
cdf = thinkstats2.Cdf(complete, label='cdf')
sf = SurvivalFunction(cdf, label='survival')
print(cdf[13])
print(sf[13])
thinkplot.Plot(sf)
thinkplot.Cdf(cdf, alpha=0.2)
thinkplot.Config()
thinkplot.SubPlot(2)
hf = sf.MakeHazard(label='hazard')
print(hf[39])
thinkplot.Plot(hf)
thinkplot.Config(ylim=[0, 0.75])
def PlotHazard(complete, ongoing):
"""Plots the hazard function and survival function.
complete: list of complete lifetimes
ongoing: list of ongoing lifetimes
"""
# plot S(t) based on only complete pregnancies
cdf = thinkstats2.Cdf(complete)
sf = SurvivalFunction(cdf)
thinkplot.Plot(sf, label='old S(t)', alpha=0.1)
thinkplot.PrePlot(2)
# plot the hazard function
hf = EstimateHazardFunction(complete, ongoing)
thinkplot.Plot(hf, label='lams(t)', alpha=0.5)
# plot the survival function
sf = hf.MakeSurvival()
thinkplot.Plot(sf, label='S(t)')
thinkplot.Show(xlabel='t (weeks)')
def EstimateHazardFunction(complete, ongoing, label='', shift=1e-7):
"""Estimates the hazard function by Kaplan-Meier.
http://en.wikipedia.org/wiki/Kaplan%E2%80%93Meier_estimator
complete: list of complete lifetimes
ongoing: list of ongoing lifetimes
label: string
shift: presumed additional survival of ongoing
"""
# pmf and sf of complete lifetimes
n = len(complete)
hist_complete = thinkstats2.Hist(complete)
sf_complete = SurvivalFunction(thinkstats2.Cdf(complete))
# sf for ongoing lifetimes
# The shift is a regrettable hack needed to deal with simultaneity.
# If a case is complete at some t and another case is ongoing
# at t, we presume that the ongoing case exceeds t+shift.
m = len(ongoing)
cdf = thinkstats2.Cdf(ongoing).Shift(shift)
sf_ongoing = SurvivalFunction(cdf)
lams = {}
for t, ended in sorted(hist_complete.Items()):
at_risk = ended + n * sf_complete[t] + m * sf_ongoing[t]
lams[t] = ended / at_risk
#print(t, ended, n * sf_complete[t], m * sf_ongoing[t], at_risk)
return HazardFunction(lams, label=label)
def CleanData(resp):
"""Cleans a respondent DataFrame.
resp: DataFrame of respondents
"""
resp.cmmarrhx.replace([9997, 9998, 9999], np.nan, inplace=True)
resp['agemarry'] = (resp.cmmarrhx - resp.cmbirth) / 12.0
resp['age'] = (resp.cmintvw - resp.cmbirth) / 12.0
month0 = pandas.to_datetime('1899-12-15')
dates = [month0 + pandas.DateOffset(months=cm)
for cm in resp.cmbirth]
resp['decade'] = (pandas.DatetimeIndex(dates).year - 1900) // 10
def AddLabelsByDecade(groups, **options):
"""Draws fake points in order to add labels to the legend.
groups: GroupBy object
"""
thinkplot.PrePlot(len(groups))
for name, _ in groups:
label = '%d0s' % name
thinkplot.Plot([15], [1], label=label, **options)
def EstimateSurvivalByDecade(groups, **options):
"""Groups respondents by decade and plots survival curves.
groups: GroupBy object
"""
thinkplot.PrePlot(len(groups))
for _, group in groups:
_, sf = EstimateSurvival(group)
thinkplot.Plot(sf, **options)
def PlotPredictionsByDecade(groups, **options):
"""Groups respondents by decade and plots survival curves.
groups: GroupBy object
"""
hfs = []
for _, group in groups:
hf, sf = EstimateSurvival(group)
hfs.append(hf)
thinkplot.PrePlot(len(hfs))
for i, hf in enumerate(hfs):
if i > 0:
hf.Extend(hfs[i-1])
sf = hf.MakeSurvival()
thinkplot.Plot(sf, **options)
def ResampleSurvival(resp, iters=101):
"""Resamples respondents and estimates the survival function.
resp: DataFrame of respondents
iters: number of resamples
"""
_, sf = EstimateSurvival(resp)
thinkplot.Plot(sf)
low, high = resp.agemarry.min(), resp.agemarry.max()
ts = np.arange(low, high, 1/12.0)
ss_seq = []
for _ in range(iters):
sample = thinkstats2.ResampleRowsWeighted(resp)
_, sf = EstimateSurvival(sample)
ss_seq.append(sf.Probs(ts))
low, high = thinkstats2.PercentileRows(ss_seq, [5, 95])
thinkplot.FillBetween(ts, low, high, color='gray', label='90% CI')
thinkplot.Save(root='survival3',
xlabel='age (years)',
ylabel='prob unmarried',
xlim=[12, 46],
ylim=[0, 1],
formats=FORMATS)
def EstimateSurvival(resp):
"""Estimates the survival curve.
resp: DataFrame of respondents
returns: pair of HazardFunction, SurvivalFunction
"""
complete = resp[resp.evrmarry == 1].agemarry
ongoing = resp[resp.evrmarry == 0].age
hf = EstimateHazardFunction(complete, ongoing)
sf = hf.MakeSurvival()
return hf, sf
def PlotMarriageData(resp):
"""Plots hazard and survival functions.
resp: DataFrame of respondents
"""
hf, sf = EstimateSurvival(resp)
thinkplot.PrePlot(rows=2)
thinkplot.Plot(hf)
thinkplot.Config(legend=False)
thinkplot.SubPlot(2)
thinkplot.Plot(sf)
thinkplot.Save(root='survival2',
xlabel='age (years)',
ylabel='prob unmarried',
ylim=[0, 1],
legend=False,
formats=FORMATS)
return sf
def PlotPregnancyData(preg):
"""Plots survival and hazard curves based on pregnancy lengths.
preg:
"""
complete = preg.query('outcome in [1, 3, 4]').prglngth
print('Number of complete pregnancies', len(complete))
ongoing = preg[preg.outcome == 6].prglngth
print('Number of ongoing pregnancies', len(ongoing))
PlotSurvival(complete)
thinkplot.Save(root='survival1',
xlabel='t (weeks)',
formats=FORMATS)
hf = EstimateHazardFunction(complete, ongoing)
sf = hf.MakeSurvival()
return sf
def PlotRemainingLifetime(sf1, sf2):
"""Plots remaining lifetimes for pregnancy and age at first marriage.
sf1: SurvivalFunction for pregnancy length
sf2: SurvivalFunction for age at first marriage
"""
thinkplot.PrePlot(cols=2)
rem_life1 = sf1.RemainingLifetime()
thinkplot.Plot(rem_life1)
thinkplot.Config(title='pregnancy length',
xlabel='weeks',
ylabel='mean remaining weeks')
thinkplot.SubPlot(2)
func = lambda pmf: pmf.Percentile(50)
rem_life2 = sf2.RemainingLifetime(filler=np.inf, func=func)
thinkplot.Plot(rem_life2)
thinkplot.Config(title='age at first marriage',
ylim=[0, 15],
xlim=[11, 31],
xlabel='age (years)',
ylabel='median remaining years')
thinkplot.Save(root='survival6',
formats=FORMATS)
def ReadFemResp(dct_file='2002FemResp.dct',
dat_file='2002FemResp.dat.gz',
**options):
"""Reads the NSFG respondent data.
dct_file: string file name
dat_file: string file name
returns: DataFrame
"""
dct = thinkstats2.ReadStataDct(dct_file, encoding='iso-8859-1')
df = dct.ReadFixedWidth(dat_file, compression='gzip', **options)
CleanData(df)
return df
def ReadFemResp2002():
"""Reads respondent data from NSFG Cycle 6.
returns: DataFrame
"""
usecols = ['cmmarrhx', 'cmdivorcx', 'cmbirth', 'cmintvw',
'evrmarry', 'finalwgt']
resp = ReadFemResp(usecols=usecols)
CleanData(resp)
return resp
def ReadFemResp2010():
"""Reads respondent data from NSFG Cycle 7.
returns: DataFrame
"""
usecols = ['cmmarrhx', 'cmdivorcx', 'cmbirth', 'cmintvw',
'evrmarry', 'wgtq1q16']
resp = ReadFemResp('2006_2010_FemRespSetup.dct',
'2006_2010_FemResp.dat.gz',
usecols=usecols)
resp['finalwgt'] = resp.wgtq1q16
CleanData(resp)
return resp
def ReadFemResp2013():
"""Reads respondent data from NSFG Cycle 8.
returns: DataFrame
"""
usecols = ['cmmarrhx', 'cmdivorcx', 'cmbirth', 'cmintvw',
'evrmarry', 'wgt2011_2013']
resp = ReadFemResp('2011_2013_FemRespSetup.dct',
'2011_2013_FemRespData.dat.gz',
usecols=usecols)
resp['finalwgt'] = resp.wgt2011_2013
CleanData(resp)
return resp
def ReadFemResp1995():
"""Reads respondent data from NSFG Cycle 5.
returns: DataFrame
"""
dat_file = '1995FemRespData.dat.gz'
names = ['a_doi', 'timesmar', 'mardat01', 'bdaycenm', 'post_wt']
colspecs = [(12359, 12363),
(3538, 3540),
(11758, 11762),
(13, 16),
(12349, 12359)]
df = pandas.read_fwf(dat_file,
compression='gzip',
colspecs=colspecs,
names=names)
df['cmmarrhx'] = df.mardat01
df['cmbirth'] = df.bdaycenm
df['cmintvw'] = df.a_doi
df['finalwgt'] = df.post_wt
df.timesmar.replace([98, 99], np.nan, inplace=True)
df['evrmarry'] = (df.timesmar > 0).astype(int)
CleanData(df)
return df
def ReadFemResp1982():
"""Reads respondent data from NSFG Cycle 4.
returns: DataFrame
"""
dat_file = '1982NSFGData.dat.gz'
names = ['cmmarrhx', 'MARNO', 'cmintvw', 'cmbirth', 'finalwgt']
#actual = ['MARIMO', 'MARNO', 'TL', 'TL', 'W5']
colspecs = [(1028, 1031),
(1258, 1259),
(841, 844),
(12, 15),
(976, 982)]
df = pandas.read_fwf(dat_file, compression='gzip', colspecs=colspecs, names=names)
df.MARNO.replace([98, 99], np.nan, inplace=True)
df['evrmarry'] = (df.MARNO > 0).astype(int)
CleanData(df)
return df[:7969]
def ReadFemResp1988():
"""Reads respondent data from NSFG Cycle 4.
returns: DataFrame
"""
dat_file = '1988FemRespData.dat.gz'
names = ['F_13'] #['CMOIMO', 'F_13', 'F19M1MO', 'A_3']
# colspecs = [(799, 803)],
colspecs = [(20, 22)]#,
# (1538, 1542),
# (26, 30),
# (2568, 2574)]
df = pandas.read_fwf(dat_file, compression='gzip', colspecs=colspecs, names=names)
# df['cmmarrhx'] = df.F19M1MO
# df['cmbirth'] = df.A_3
# df['cmintvw'] = df.CMOIMO
# df['finalwgt'] = df.W5
df.F_13.replace([98, 99], np.nan, inplace=True)
df['evrmarry'] = (df.F_13 > 0).astype(int)
# CleanData(df)
return df
def PlotResampledByDecade(resps, iters=11, predict_flag=False, omit=None):
"""Plots survival curves for resampled data.
resps: list of DataFrames
iters: number of resamples to plot
predict_flag: whether to also plot predictions
"""
for i in range(iters):
samples = [thinkstats2.ResampleRowsWeighted(resp)
for resp in resps]
sample = pandas.concat(samples, ignore_index=True)
groups = sample.groupby('decade')
if omit:
groups = [(name, group) for name, group in groups
if name not in omit]
# TODO: refactor this to collect resampled estimates and
# plot shaded areas
if i == 0:
AddLabelsByDecade(groups, alpha=0.7)
if predict_flag:
PlotPredictionsByDecade(groups, alpha=0.1)
EstimateSurvivalByDecade(groups, alpha=0.1)
else:
EstimateSurvivalByDecade(groups, alpha=0.2)
def main():
thinkstats2.RandomSeed(17)
preg = nsfg.ReadFemPreg()
sf1 = PlotPregnancyData(preg)
# make the plots based on Cycle 6
resp6 = ReadFemResp2002()
sf2 = PlotMarriageData(resp6)
ResampleSurvival(resp6)
PlotRemainingLifetime(sf1, sf2)
# read Cycles 5 and 7
resp5 = ReadFemResp1995()
resp7 = ReadFemResp2010()
# plot resampled survival functions by decade
resps = [resp5, resp6, resp7]
PlotResampledByDecade(resps)
thinkplot.Save(root='survival4',
xlabel='age (years)',
ylabel='prob unmarried',
xlim=[13, 45],
ylim=[0, 1],
formats=FORMATS)
# plot resampled survival functions by decade, with predictions
PlotResampledByDecade(resps, predict_flag=True, omit=[5])
thinkplot.Save(root='survival5',
xlabel='age (years)',
ylabel='prob unmarried',
xlim=[13, 45],
ylim=[0, 1],
formats=FORMATS)
if __name__ == '__main__':
main()
| gpl-3.0 |
SpatialMetabolomics/SM_distributed | sm/engine/mol_db.py | 2 | 4349 | from collections import OrderedDict
import pandas as pd
import logging
import requests
from sm.engine.db import DB
from sm.engine.util import SMConfig
logger = logging.getLogger('engine')
SF_INS = 'INSERT INTO sum_formula (db_id, sf) values (%s, %s)'
SF_COUNT = 'SELECT count(*) FROM sum_formula WHERE db_id = %s'
SF_SELECT = 'SELECT sf FROM sum_formula WHERE db_id = %s'
class MolDBServiceWrapper(object):
def __init__(self, service_url):
self._service_url = service_url
self._session = requests.Session()
def _fetch(self, url):
r = self._session.get(url)
r.raise_for_status()
return r.json()['data']
def fetch_all_dbs(self):
url = '{}/databases'.format(self._service_url)
return self._fetch(url)
def find_db_by_id(self, id):
url = '{}/databases/{}'.format(self._service_url, id)
return self._fetch(url)
def find_db_by_name_version(self, name, version=None):
url = '{}/databases?name={}'.format(self._service_url, name)
if version:
url += '&version={}'.format(version)
return self._fetch(url)
def fetch_db_sfs(self, db_id):
return self._fetch('{}/databases/{}/sfs'.format(self._service_url, db_id))
def fetch_molecules(self, db_id, sf=None):
if sf:
url = '{}/databases/{}/molecules?sf={}&fields=mol_id,mol_name'
return self._fetch(url.format(self._service_url, db_id, sf))
else:
# TODO: replace one large request with several smaller ones
url = '{}/databases/{}/molecules?fields=sf,mol_id,mol_name&limit=10000000'
return self._fetch(url.format(self._service_url, db_id))
class MolecularDB(object):
""" A class representing a molecule database to search through.
Provides several data structures used in the engine to speed up computation
Args
----------
name: str
version: str
If None the latest version will be used
iso_gen_config : dict
Isotope generator configuration
mol_db_service : sm.engine.MolDBServiceWrapper
Molecular database ID/name resolver
db : DB
Database connector
"""
def __init__(self, id=None, name=None, version=None, iso_gen_config=None,
mol_db_service=None, db=None):
self._iso_gen_config = iso_gen_config
sm_config = SMConfig.get_conf()
self._mol_db_service = mol_db_service or MolDBServiceWrapper(sm_config['services']['mol_db'])
self._db = db
if id is not None:
data = self._mol_db_service.find_db_by_id(id)
elif name is not None:
data = self._mol_db_service.find_db_by_name_version(name, version)[0]
else:
raise Exception('MolDB id or name should be provided')
self._id, self._name, self._version = data['id'], data['name'], data['version']
self._sf_df = None
self._job_id = None
self._sfs = None
self._ion_centroids = None
def __str__(self):
return '{} {}'.format(self.name, self.version)
@property
def id(self):
return self._id
@property
def name(self):
return self._name
@property
def version(self):
return self._version
@property
def ion_centroids(self):
return self._ion_centroids
def set_ion_centroids(self, ion_centroids):
self._ion_centroids = ion_centroids
def set_job_id(self, job_id):
self._job_id = job_id
def get_molecules(self, sf=None):
""" Returns a dataframe with (mol_id, mol_name) or (sf, mol_id, mol_name) rows
Args
----------
sf: str
Returns
----------
pd.DataFrame
"""
return pd.DataFrame(self._mol_db_service.fetch_molecules(self.id, sf=sf))
@property
def sfs(self):
""" Total list of formulas """
if not self._sfs:
if self._db.select_one(SF_COUNT, params=(self._id,))[0] == 0:
sfs = self._mol_db_service.fetch_db_sfs(self.id)
rows = [(self._id, sf) for sf in sfs]
self._db.insert(SF_INS, rows)
self._sfs = [row[0] for row in self._db.select(SF_SELECT, params=(self._id,))]
return self._sfs
| apache-2.0 |
pythonvietnam/scikit-learn | sklearn/linear_model/tests/test_logistic.py | 23 | 27579 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
from sklearn.metrics import log_loss
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial')]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ("Logistic Regression supports only liblinear, newton-cg and"
" lbfgs solvers, got wrong_name")
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solver except 'newton-cg' and 'lfbgs'
for solver in ['liblinear']:
msg = ("Solver %s does not support a multinomial backend." %
solver)
lr = LR(solver=solver, multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for method in ('lbfgs', 'newton-cg', 'liblinear'):
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-16, solver=method)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-16)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4)
# test for fit_intercept=True
for method in ('lbfgs', 'newton-cg', 'liblinear'):
Cs = [1e3]
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-4, solver=method)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=3)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=3)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=balanced
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='balanced')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='balanced')
clf_lib.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
def test_logreg_predict_proba_multinomial():
X, y = make_classification(
n_samples=10, n_features=20, random_state=0, n_classes=3, n_informative=10)
# Predicted probabilites using the true-entropy loss should give a smaller loss
# than those using the ovr method.
clf_multi = LogisticRegression(multi_class="multinomial", solver="lbfgs")
clf_multi.fit(X, y)
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_ovr = LogisticRegression(multi_class="ovr", solver="lbfgs")
clf_ovr.fit(X, y)
clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X))
assert_greater(clf_ovr_loss, clf_multi_loss)
# Predicted probabilites using the soft-max function should give a smaller loss
# than those using the logistic function.
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X))
assert_greater(clf_wrong_loss, clf_multi_loss)
| bsd-3-clause |
samuelstjean/dipy | dipy/viz/projections.py | 11 | 3883 | """
Visualization tools for 2D projections of 3D functions on the sphere, such as
ODFs.
"""
import numpy as np
import scipy.interpolate as interp
from ..utils.optpkg import optional_package
matplotlib, has_mpl, setup_module = optional_package("matplotlib")
plt, _, _ = optional_package("matplotlib.pyplot")
tri, _, _ = optional_package("matplotlib.tri")
bm, has_basemap, _ = optional_package("mpl_toolkits.basemap")
import dipy.core.geometry as geo
from dipy.testing import doctest_skip_parser
@doctest_skip_parser
def sph_project(vertices, val, ax=None, vmin=None, vmax=None, cmap=None,
cbar=True, tri=False, boundary=False, **basemap_args):
"""Draw a signal on a 2D projection of the sphere.
Parameters
----------
vertices : (N,3) ndarray
unit vector points of the sphere
val: (N) ndarray
Function values.
ax : mpl axis, optional
If specified, draw onto this existing axis instead.
vmin, vmax : floats
Values to cut the z
cmap : mpl colormap
cbar: Whether to add the color-bar to the figure
triang : Whether to display the plot triangulated as a pseudo-color plot.
boundary : Whether to draw the boundary around the projection in a black line
Returns
-------
ax : axis
Matplotlib figure axis
Examples
--------
>>> from dipy.data import get_sphere
>>> verts = get_sphere('symmetric724').vertices
>>> ax = sph_project(verts.T, np.random.rand(len(verts.T))) # skip if not has_basemap
"""
if ax is None:
fig, ax = plt.subplots(1)
if cmap is None:
cmap = matplotlib.cm.hot
basemap_args.setdefault('projection', 'ortho')
basemap_args.setdefault('lat_0', 0)
basemap_args.setdefault('lon_0', 0)
basemap_args.setdefault('resolution', 'c')
from mpl_toolkits.basemap import Basemap
m = Basemap(**basemap_args)
if boundary:
m.drawmapboundary()
# Rotate the coordinate system so that you are looking from the north pole:
verts_rot = np.array(np.dot(np.matrix([[0,0,-1],[0,1,0],[1,0,0]]), vertices))
# To get the orthographic projection, when the first coordinate is positive:
neg_idx = np.where(verts_rot[0]>0)
# rotate the entire bvector around to point in the other direction:
verts_rot[:, neg_idx] *= -1
_, theta, phi = geo.cart2sphere(verts_rot[0], verts_rot[1], verts_rot[2])
lat, lon = geo.sph2latlon(theta, phi)
x, y = m(lon, lat)
my_min = np.nanmin(val)
if vmin is not None:
my_min = vmin
my_max = np.nanmax(val)
if vmax is not None:
my_max = vmax
if tri:
m.pcolor(x, y, val, vmin=my_min, vmax=my_max, tri=True, cmap=cmap)
else:
cmap_data = cmap._segmentdata
red_interp, blue_interp, green_interp = (
interp.interp1d(np.array(cmap_data[gun])[:,0],
np.array(cmap_data[gun])[:,1]) for gun in
['red', 'blue','green'])
r = (val - my_min)/float(my_max-my_min)
# Enforce the maximum and minumum boundaries, if there are values
# outside those boundaries:
r[r<0]=0
r[r>1]=1
for this_x, this_y, this_r in zip(x,y,r):
red = red_interp(this_r)
blue = blue_interp(this_r)
green = green_interp(this_r)
m.plot(this_x, this_y, 'o',
c=[red.item(), green.item(), blue.item()])
if cbar:
mappable = matplotlib.cm.ScalarMappable(cmap=cmap)
mappable.set_array([my_min, my_max])
# setup colorbar axes instance.
pos = ax.get_position()
l, b, w, h = pos.bounds
# setup colorbar axes
cax = fig.add_axes([l+w+0.075, b, 0.05, h], frameon=False)
fig.colorbar(mappable, cax=cax) # draw colorbar
return ax
| bsd-3-clause |
procoder317/scikit-learn | benchmarks/bench_covertype.py | 120 | 7381 | """
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <[email protected]>
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3),
'SAG': LogisticRegression(solver='sag', max_iter=2, C=1000)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
| bsd-3-clause |
abhishekgahlot/scikit-learn | examples/svm/plot_separating_hyperplane.py | 62 | 1274 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machines classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
mjgrav2001/scikit-learn | examples/cluster/plot_kmeans_silhouette_analysis.py | 242 | 5885 | """
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhoette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distict cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhoutte score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
| bsd-3-clause |
tjcunliffe/mirage | stubo/ext/parse_date.py | 4 | 6447 | """
:copyright: (c) 2015 by OpenCredo.
:license: GPLv3, see LICENSE for more details.
"""
# adapted from https://github.com/pydata/pandas/blob/master/pandas/tseries/tools.py
from datetime import datetime, timedelta
import re
import sys
from StringIO import StringIO
import logging
import dateutil
from dateutil.parser import parse, DEFAULTPARSER
log = logging.getLogger(__name__)
# raise exception if dateutil 2.0 install on 2.x platform
if (sys.version_info[0] == 2 and
dateutil.__version__ == '2.0'): # pragma: no cover
raise Exception('dateutil 2.0 incompatible with Python 2.x, you must '
'install version 1.5 or 2.1+!')
# otherwise a 2nd import won't show the message
_DATEUTIL_LEXER_SPLIT = None
try:
# Since these are private methods from dateutil, it is safely imported
# here so in case this interface changes, pandas will just fallback
# to not using the functionality
from dateutil.parser import _timelex
if hasattr(_timelex, 'split'):
def _lexer_split_from_str(dt_str):
# The StringIO(str(_)) is for dateutil 2.2 compatibility
return _timelex.split(StringIO(str(dt_str)))
_DATEUTIL_LEXER_SPLIT = _lexer_split_from_str
except (ImportError, AttributeError):
pass
def _guess_datetime_format(dt_str, parsed_datetime, dayfirst,
dt_str_split=_DATEUTIL_LEXER_SPLIT):
"""
Guess the datetime format of a given datetime string.
Parameters
----------
dt_str : string, datetime string to guess the format of
parsed_datetime : result of dateutil.parser.parse
dayfirst : boolean, default True
If True parses dates with the day first, eg 20/01/2005
Warning: dayfirst=True is not strict, but will prefer to parse
with day first (this is a known bug).
dt_str_split : function, defaults to `_DATEUTIL_LEXER_SPLIT` (dateutil)
This function should take in a datetime string and return
a list of strings, the guess of the various specific parts
e.g. '2011/12/30' -> ['2011', '/', '12', '/', '30']
Returns
-------
ret : datetime format string (for `strftime` or `strptime`)
"""
log.debug('_guess_datetime_format, dt_str={0}'.format(dt_str))
if dt_str_split is None:
return None
if not isinstance(dt_str, basestring):
return None
day_attribute_and_format = (('day',), '%d')
datetime_attrs_to_format = [
(('year', 'month', 'day'), '%Y%m%d'),
(('year',), '%Y'),
(('month',), '%B'),
(('month',), '%b'),
(('month',), '%m'),
day_attribute_and_format,
(('hour',), '%H'),
(('minute',), '%M'),
(('second',), '%S'),
(('microsecond',), '%f'),
(('second', 'microsecond'), '%S.%f'),
]
if dayfirst:
datetime_attrs_to_format.remove(day_attribute_and_format)
datetime_attrs_to_format.insert(0, day_attribute_and_format)
if parsed_datetime is None:
return None
try:
log.debug('dt_str_split(dt_str)')
tokens = dt_str_split(dt_str)
except:
# In case the datetime string can't be split, its format cannot
# be guessed
return None
log.debug('split tokens={0}'.format(tokens))
format_guess = [None] * len(tokens)
found_attrs = set()
for attrs, attr_format in datetime_attrs_to_format:
# If a given attribute has been placed in the format string, skip
# over other formats for that same underlying attribute (IE, month
# can be represented in multiple different ways)
if set(attrs) & found_attrs:
continue
if all(getattr(parsed_datetime, attr) is not None for attr in attrs):
for i, token_format in enumerate(format_guess):
if (token_format is None and
tokens[i] == parsed_datetime.strftime(attr_format)):
format_guess[i] = attr_format
found_attrs.update(attrs)
break
log.debug('found_attrs={0}'.format(found_attrs))
log.debug('format_guess={0}'.format(format_guess))
# Only consider it a valid guess if we have a year, month and day
if len(set(['year', 'month', 'day']) & found_attrs) != 3:
return None
output_format = []
for i, guess in enumerate(format_guess):
if guess is not None:
# Either fill in the format placeholder (like %Y)
output_format.append(guess)
else:
# Or just the token separate (IE, the dashes in "01-01-2013")
try:
# If the token is numeric, then we likely didn't parse it
# properly, so our guess is wrong
if float(tokens[i]) != 0.0:
return None
except ValueError:
pass
output_format.append(tokens[i])
guessed_format = ''.join(output_format)
if parsed_datetime.strftime(guessed_format) == dt_str:
return guessed_format
has_time = re.compile('(.+)([\s]|T)+(.+)')
def parse_date_string(date_str, dayfirst=False, yearfirst=True):
"""
Try hard to parse datetime string, leveraging dateutil plus some extras
Parameters
----------
arg : date string
dayfirst : bool,
yearfirst : bool
Returns
-------
datetime, datetime format string (for `strftime` or `strptime`)
or None if unable parse date str
"""
if not isinstance(date_str, basestring):
return None
arg = date_str.upper()
parse_info = DEFAULTPARSER.info
if len(arg) in (7, 8):
mresult = _attempt_monthly(arg)
log.debug('mresult={0}'.format(mresult))
if mresult:
return mresult
parsed_datetime = DEFAULTPARSER.parse(StringIO(str(arg)), dayfirst=dayfirst,
yearfirst=yearfirst, fuzzy=True)
log.debug('parsed_datetime={0}'.format(parsed_datetime))
if parsed_datetime:
date_format = _guess_datetime_format(date_str, parsed_datetime,
dayfirst=dayfirst)
return parsed_datetime, date_format
def _attempt_monthly(val):
pats = ['%Y-%m', '%m-%Y', '%b %Y', '%b-%Y']
for pat in pats:
try:
ret = datetime.strptime(val, pat)
return ret, pat
except Exception:
pass
| gpl-3.0 |
google/syzygy | third_party/numpy/files/numpy/lib/npyio.py | 16 | 61927 | __all__ = ['savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource']
import numpy as np
import format
import sys
import os
import sys
import itertools
import warnings
from operator import itemgetter
from cPickle import load as _cload, loads
from _datasource import DataSource
from _compiled_base import packbits, unpackbits
from _iotools import LineSplitter, NameValidator, StringConverter, \
ConverterError, ConverterLockError, ConversionWarning, \
_is_string_like, has_nested_fields, flatten_dtype, \
easy_dtype, _bytes_to_name
from numpy.compat import asbytes, asstr, asbytes_nested, bytes
if sys.version_info[0] >= 3:
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
_string_like = _is_string_like
def seek_gzip_factory(f):
"""Use this factory to produce the class so that we can do a lazy
import on gzip.
"""
import gzip
class GzipFile(gzip.GzipFile):
def seek(self, offset, whence=0):
# figure out new position (we can only seek forwards)
if whence == 1:
offset = self.offset + offset
if whence not in [0, 1]:
raise IOError, "Illegal argument"
if offset < self.offset:
# for negative seek, rewind and do positive seek
self.rewind()
count = offset - self.offset
for i in range(count // 1024):
self.read(1024)
self.read(count % 1024)
def tell(self):
return self.offset
if isinstance(f, str):
f = GzipFile(f)
elif isinstance(f, gzip.GzipFile):
# cast to our GzipFile if its already a gzip.GzipFile
try:
name = f.name
except AttributeError:
# Backward compatibility for <= 2.5
name = f.filename
mode = f.mode
f = GzipFile(fileobj=f.fileobj, filename=name)
f.mode = mode
return f
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
self._obj = obj
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError, key
def zipfile_factory(*args, **kwargs):
import zipfile
if sys.version_info >= (2, 5):
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ".npy" extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ".npy" extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ".npy" extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.read(key)
if bytes.startswith(format.MAGIC_PREFIX):
value = BytesIO(bytes)
return format.read_array(value)
else:
return bytes
else:
raise KeyError, "%s is not a file in the archive" % key
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ".npy" extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None):
"""
Load a pickled, ``.npy``, or ``.npz`` binary file.
Parameters
----------
file : file-like object or string
The file to read. It must support ``seek()`` and ``read()`` methods.
If the filename extension is ``.gz``, the file is first decompressed.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode
(see `numpy.memmap`). The mode has no effect for pickled or
zipped files.
A memory-mapped array is stored on disk, and not directly loaded
into memory. However, it can be accessed and sliced like any
ndarray. Memory mapping is especially useful for accessing
small fragments of large files without reading the entire file
into memory.
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file.
Raises
------
IOError
If the input file does not exist or cannot be read.
See Also
--------
save, savez, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever is stored in the
pickle is returned.
- If the file is a ``.npy`` file, then an array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
elif isinstance(file, gzip.GzipFile):
fid = seek_gzip_factory(file)
own_fid = True
else:
fid = file
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX): # zip-file (assume .npz)
own_fid = False
return NpzFile(fid, own_fid=True)
elif magic == format.MAGIC_PREFIX: # .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid)
else: # Try a pickle
try:
return _cload(fid)
except:
raise IOError, \
"Failed to interpret file %s as a pickle" % repr(file)
finally:
if own_fid:
fid.close()
def save(file, arr):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see `format`.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
else:
fid = file
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the .npz file, are 'arr_0', 'arr_1', etc. If keyword arguments
are given, the corresponding variable names, in the ``.npz`` file will
match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
*args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
**kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with *args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with **kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
See Also
--------
numpy.savez_compressed : Save several arrays into a compressed .npz file format
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str
File name of .npz file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed .npz file format
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError, "Cannot use un-named variables and keyword %s" % key
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zip = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.iteritems():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val))
fid.close()
fid = None
zip.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zip.close()
# Adapted from matplotlib
def _getconv(dtype):
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.floating):
return float
elif issubclass(typ, np.complex):
return complex
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
record data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str, optional
The character used to indicate the start of a comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a record
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
Examples
--------
>>> from StringIO import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
comments = asbytes(comments)
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
usecols = list(usecols)
fown = False
try:
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
fh = iter(seek_gzip_factory(fname))
elif fname.endswith('.bz2'):
import bz2
fh = iter(bz2.BZ2File(fname))
else:
fh = iter(open(fname, 'U'))
else:
fh = iter(fname)
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], tuple)]
if len(shape) > 1:
for dim in dt.shape[-2:0:-1]:
packing = [(dim*packing[0][0],packing*dim)]
packing = packing*shape[0]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
packing.append((len(flat_dt),flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing == None:
return items[0]
elif packing is tuple:
return tuple(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter."""
line = asbytes(line).split(comments)[0].strip(asbytes('\r\n'))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in xrange(skiprows):
fh.next()
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = fh.next()
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in xrange(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).iteritems():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if not ndmin in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n'):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored.
delimiter : str
Character separating columns.
newline : str
.. versionadded:: 1.5.0
Character separating lines.
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into a ``.npz`` compressed archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to preceed result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif type(fmt) is str:
if fmt.count('%') == 1:
fmt = [fmt, ]*ncol
format = delimiter.join(fmt)
elif fmt.count('%') != ncol:
raise AttributeError('fmt has wrong number of %% formats. %s'
% fmt)
else:
format = fmt
for row in X:
fh.write(asbytes(format % tuple(row) + newline))
finally:
if own_fh:
fh.close()
import re
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skiprows=0, skip_header=0, skip_footer=0, converters=None,
missing='', missing_values=None, filling_values=None,
usecols=None, names=None,
excludelist=None, deletechars=None, replace_space='_',
autostrip=False, case_sensitive=True, defaultfmt="f%i",
unpack=None, usemask=False, loose=True, invalid_raise=True):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
`.gz` or `.bz2`, the file is first decompressed. Note that
generators must return byte strings in Python 3k.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skip_header : int, optional
The numbers of lines to skip at the beginning of the file.
skip_footer : int, optional
The numbers of lines to skip at the end of the file
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables names.
By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
Examples
---------
>>> from StringIO import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
# Py3 data conversions to bytes, for convenience
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing, unicode):
missing = asbytes(missing)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
errmsg = "The input argument 'converter' should be a valid dictionary "\
"(got '%s' instead)"
raise TypeError(errmsg % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if isinstance(fname, basestring):
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError("fname mustbe a string, filehandle, or generator. "\
"(got %s instead)" % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Get the first valid lines after the first skiprows ones ..
if skiprows:
warnings.warn(\
"The use of `skiprows` is deprecated, it will be removed in numpy 2.0.\n" \
"Please use `skip_header` instead.",
DeprecationWarning)
skip_header = skiprows
# Skip the first `skip_header` rows
for i in xrange(skip_header):
fhd.next()
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = fhd.next()
if names is True:
if comments in first_line:
first_line = asbytes('').join(first_line.split(comments)[1:])
first_values = split_line(first_line)
except StopIteration:
# might want to return empty array instead of raising error.
raise IOError('End-of-file reached before encountering data.')
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the deprecated `missing`
if missing != asbytes(''):
warnings.warn(\
"The use of `missing` is deprecated, it will be removed in Numpy 2.0.\n" \
"Please use `missing_values` instead.",
DeprecationWarning)
values = [str(_) for _ in missing.split(asbytes(","))]
for entry in missing_values:
entry.extend(values)
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values or []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (i, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(i):
try:
i = names.index(i)
except ValueError:
continue
elif usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
# Find the value to test:
if len(first_line):
testing_value = first_values[i]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
# Select only the columns we need
if usecols:
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values, missing_values)]))
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = map(itemgetter(i), rows)
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = itertools.imap(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
# if loose:
# conversionfuncs = [conv._loose_call for conv in converters]
# else:
# conversionfuncs = [conv._strict_call for conv in converters]
# for (i, vals) in enumerate(rows):
# rows[i] = tuple([convert(val)
# for (convert, val) in zip(conversionfuncs, vals)])
if loose:
rows = zip(*[map(converter._loose_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)])
else:
rows = zip(*[map(converter._strict_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)])
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = zip(names, column_types)
mdtype = zip(names, [np.bool] * len(column_types))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
errmsg = "Nested fields involving objects "\
"are not supported..."
raise NotImplementedError(errmsg)
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(masks,
dtype=np.dtype([('', np.bool)
for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for (i, ttype) in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Complete description of all the optional input parameters is available in
the docstring of the `genfromtxt` function.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
For a complete description of all the input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Complete description of all the optional input parameters is available in
the docstring of the `genfromtxt` function.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.update(dtype=kwargs.get('dtype', None))
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
For a complete description of all the input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
case_sensitive = kwargs.get('case_sensitive', "lower") or "lower"
names = kwargs.get('names', True)
if names is None:
names = True
kwargs.update(dtype=kwargs.get('update', None),
delimiter=kwargs.get('delimiter', ",") or ",",
names=names,
case_sensitive=case_sensitive)
usemask = kwargs.get("usemask", False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| apache-2.0 |
legacysurvey/pipeline | py/legacyanalysis/check-psf.py | 2 | 26103 | import matplotlib
matplotlib.use('Agg')
import pylab as plt
import numpy as np
import sys
import os
import fitsio
from astrometry.util.fits import fits_table
from astrometry.util.plotutils import PlotSequence, dimshow
from astrometry.libkd.spherematch import match_radec
from astrometry.util.util import Tan
from tractor import *
from legacypipe.survey import *
def subplot_grid(ny, nx, i):
y = i/nx
x = i%nx
plt.subplot(ny, nx, 1 + ((ny-1)-y)*nx + x)
if __name__ == '__main__':
survey = LegacySurveyData()
ps = PlotSequence('psf')
#B = survey.get_bricks()
T = survey.get_ccds()
T.cut(T.extname == 'S1')
print 'Cut to', len(T)
#print 'Expnums:', T[:10]
T.cut(T.expnum == 348233)
print 'Cut to', len(T)
T.about()
band = T.filter[0]
print 'Band:', band
im = DecamImage(survey, T[0])
print 'Reading', im.imgfn
# Get approximate image center for astrometry
hdr = im.read_image_header()
wcs = Tan(hdr['CRVAL1'], hdr['CRVAL2'], hdr['CRPIX1'], hdr['CRPIX2'],
hdr['CD1_1'], hdr['CD1_2'], hdr['CD2_1'], hdr['CD2_2'],
hdr['NAXIS1'], hdr['NAXIS2'])
print 'WCS:', wcs
r,d = wcs.pixelxy2radec(wcs.imagew/2, wcs.imageh/2)
pixscale = wcs.pixel_scale()
run_calibs(im, r, d, pixscale, astrom=True, morph=False, se2=False)
iminfo = im.get_image_info()
print 'img:', iminfo
H,W = iminfo['dims']
#psfex = PsfEx(im.psffn, W, H, nx=6)
#psfex = PsfEx(im.psffn, W, H, ny=13, nx=7)
#psfex = PsfEx(im.psffn, W, H, ny=9, nx=5)
#psfex = PsfEx(im.psffn, W, H, ny=17, nx=9,
# psfClass=GaussianMixtureEllipsePSF)
psfex = PsfEx(im.psffn, W, H, ny=13, nx=7,
psfClass=GaussianMixtureEllipsePSF)
fn = 'psfex-ellipses.fits'
if os.path.exists(fn):
plt.figure(figsize=(5,10))
plt.subplots_adjust(left=0.01, bottom=0.01, top=0.95, right=0.99,
wspace=0.05, hspace=0.025)
pp = fitsio.read(fn)
print 'Read parameters:', pp.shape
ny,nx,nparams = pp.shape
print 'nx,ny', nx,ny
print 'nparams', nparams
psfex.ny = ny
psfex.nx = nx
YY = np.linspace(0, psfex.H, psfex.ny)
XX = np.linspace(0, psfex.W, psfex.nx)
#psfex.splinedata = (pp, XX, YY)
# Convert to GaussianMixturePSF
ppvar = np.zeros_like(pp)
for iy in range(ny):
for ix in range(nx):
psf = GaussianMixtureEllipsePSF(*pp[iy, ix, :])
mog = psf.toMog()
ppvar[iy,ix,:] = mog.getParams()
psfexvar = PsfEx(im.psffn, W, H, ny=psfex.ny, nx=psfex.nx,
psfClass=GaussianMixturePSF)
psfexvar.splinedata = (ppvar, XX, YY)
#psfexvar.fitSavedData(ppvar, XX, YY)
#fitsio.write('psfex-variances.fits', ppvar, clobber=True)
psfexvar.toFits('psfex-var-2.fits')
psfexvar2 = PsfEx.fromFits('psfex-var-2.fits')
sys.exit(0)
psfex.fitSavedData(pp, XX, YY)
subpp = pp[::2, ::2, :]
subXX, subYY = XX[::2], YY[::2]
print 'subpp:', subpp.shape
subpsfex = PsfEx(im.psffn, W, H, ny=len(subYY), nx=len(subXX),
psfClass=GaussianMixtureEllipsePSF)
subpsfex.fitSavedData(subpp, subXX, subYY)
ppvar = np.zeros_like(pp)
for iy in range(ny):
for ix in range(nx):
psf = GaussianMixtureEllipsePSF(*pp[iy, ix, :])
#print 'PSF:', psf
mog = psf.toMog()
#print 'MoG:', mog
#print 'Params:', mog.getParams()
ppvar[iy,ix,:] = mog.getParams()
psfexvar = PsfEx(im.psffn, W, H, ny=17, nx=9,
psfClass=GaussianMixturePSF)
#psfexvar = VaryingGaussianPSF(W, H, ny=17, nx=9)
psfexvar.fitSavedData(ppvar, XX, YY)
subvar = ppvar[::2, ::2, :]
subpsfexvar = PsfEx(im.psffn, W, H, ny=len(subYY), nx=len(subXX),
psfClass=GaussianMixturePSF)
subpsfexvar.fitSavedData(subvar, subXX, subYY)
psfgrid = []
psfcropgrid = []
crop = 10
for y in YY:
for x in XX:
psfimg = psfex.instantiateAt(x, y)
psfgrid.append(psfimg)
h,w = psfimg.shape
img = psfimg[h/2-crop:h/2+crop+1, w/2-crop:w/2+crop+1]
psfcropgrid.append(img)
mx = np.max([psfimg.max() for psfimg in psfgrid])
logmx = np.log10(mx)
plt.clf()
for i,psfimg in enumerate(psfcropgrid):
#plt.subplot(len(YY), len(XX), i+1)
subplot_grid(len(YY), len(XX), i)
dimshow(np.log10(np.maximum(psfimg, mx*1e-16)),
vmax=logmx, vmin=logmx-4, ticks=False, cmap='jet')
plt.suptitle('PsfEx models')
ps.savefig()
modnames = ['Dense-grid MoG', 'Coarse-grid MoG',
'Dense-grid MoG (variance)', 'Coarse-grid MoG (variance)']
models = [ psfex, subpsfex, psfexvar, subpsfexvar ]
modgrids = [[] for m in models]
for iy,y in enumerate(YY):
for ix,x in enumerate(XX):
for model,grid in zip(models, modgrids):
psf = model.psfAt(x, y)
mod = psf.getPointSourcePatch(0., 0., radius=crop)
assert(mod.shape == psfcropgrid[0].shape)
grid.append(mod.patch)
for name,modgrid in zip(modnames, modgrids):
plt.clf()
for i,psfimg in enumerate(modgrid):
subplot_grid(len(YY), len(XX), i)
dimshow(np.log10(np.maximum(psfimg, mx*1e-16)),
vmax=logmx, vmin=logmx-4, ticks=False, cmap='jet')
plt.suptitle(name)
ps.savefig()
for name,modgrid in zip(modnames, modgrids):
plt.clf()
for i,(psfimg,modimg) in enumerate(zip(psfcropgrid,modgrid)):
subplot_grid(len(YY), len(XX), i)
diff = psfimg - modimg
#print 'Max diff:', np.abs(diff).max()
dimshow(psfimg - modimg, vmin=-0.001, vmax=0.001,
ticks=False, cmap='RdBu')
plt.suptitle('PsfEx - %s' % name)
ps.savefig()
sys.exit(0)
# for ibase,basis in enumerate(psfex.psfbases):
# plt.clf()
# plt.subplot(2,1,1)
# mx = np.percentile(np.abs(basis), 99.5)
# if ibase == 0:
# dimshow(basis, ticks=False)
# else:
# dimshow(basis, ticks=False, vmin=-mx, vmax=mx)
# #plt.colorbar(fraction=0.2)
# plt.suptitle('PsfEx eigen-PSF %i' % ibase)
# plt.subplot(2,1,2)
#
# xx,yy = np.meshgrid(np.linspace(0, W, 50), np.linspace(0, H, 50))
# print 'xx,yy', xx.shape, yy.shape
# amp = np.zeros_like(xx)
# print 'amp', amp.shape
#
# dx = (xx - psfex.x0) / psfex.xscale
# dy = (yy - psfex.y0) / psfex.yscale
# for d in range(psfex.degree + 1):
# print 'degree', d
# for j in range(d+1):
# k = d - j
# print 'j', j, 'k', k
# # PSFEx manual pg. 111 ?
# ii = j + (psfex.degree+1) * k - (k * (k-1))/ 2
# print 'ii', ii
# if ii != ibase:
# continue
# amp += dx**j * dy**k
# dimshow(amp, extent=[xx[0,0],xx[0,-1],yy[0,0],yy[-1,0]])
# plt.colorbar(fraction=0.25)
# ps.savefig()
plt.subplots_adjust(left=0.02, right=0.98, bottom=0.02, top=0.92,
wspace=0.1, hspace=0.2)
nbases = len(psfex.psfbases)
cols = int(np.ceil(np.sqrt(nbases) * 1.3))
rows = int(np.ceil(nbases / float(cols)))
plt.clf()
for ibase,basis in enumerate(psfex.psfbases):
plt.subplot(rows, cols, ibase+1)
mx = np.percentile(np.abs(basis), 99.5)
if ibase == 0:
dimshow(basis, ticks=False)
else:
dimshow(basis, ticks=False, vmin=-mx, vmax=mx)
for d in range(psfex.degree + 1):
print 'degree', d
for j in range(d+1):
k = d - j
print 'j', j, 'k', k
# PSFEx manual pg. 111 ?
ii = j + (psfex.degree+1) * k - (k * (k-1))/ 2
print 'ii', ii
if ii == ibase:
xoyo = (j,k)
plt.title('$x^%i y^%i$' % xoyo, fontdict=dict(fontsize=10))
plt.suptitle('PsfEx eigen-PSFs')
ps.savefig()
plt.figure(figsize=(5,10))
plt.subplots_adjust(left=0.01, bottom=0.01, top=0.95, right=0.99,
wspace=0.05, hspace=0.025)
S = im.read_sdss()
print len(S), 'SDSS sources'
S.cut(S.objc_type == 6)
print len(S), 'SDSS stars'
S.flux = S.get('%s_psfflux' % band)
wcs = im.read_wcs()
img = im.read_image()
sky = np.median(img)
img -= sky
invvar = im.read_invvar(clip=True)
sig1 = 1./np.sqrt(np.median(invvar[invvar > 0]))
sigoff = 3
img += sig1 * sigoff
# convert to sigmas
img /= sig1
invvar *= sig1**2
H,W = img.shape
sz = 22
# Read sources detected in DECam image too
T = fits_table(im.sefn, hdu=2)
print 'Got', len(T), 'DECam sources'
T.about()
T.ra,T.dec = wcs.pixelxy2radec(T.x_image, T.y_image)
I,J,d = match_radec(S.ra, S.dec, T.ra, T.dec, 1./3600., nearest=True)
print 'Matched', len(I)
# Replace SDSS RA,Dec by DECam RA,Dec
S.cut(I)
S.ra = T.ra [J]
S.dec = T.dec[J]
ok,S.x,S.y = wcs.radec2pixelxy(S.ra, S.dec)
S.x -= 1
S.y -= 1
S.ix, S.iy = np.round(S.x).astype(int), np.round(S.y).astype(int)
S.cut((S.ix >= sz) * (S.iy >= sz) * (S.ix < W-sz) * (S.iy < H-sz))
print len(S), 'SDSS stars in bounds'
S.cut(invvar[S.iy, S.ix] > 0)
print len(S), 'SDSS stars not in masked regions'
S.cut(np.argsort(-S.flux))
rows,cols = 4,5
#rows,cols = 2,2
subimgs = []
for i in range(rows*cols):
s = S[i]
subimg = img[s.iy - sz : s.iy + sz+1, s.ix - sz : s.ix + sz+1]
subimgs.append(subimg)
# plt.clf()
# for i,subimg in enumerate(subimgs):
# plt.subplot(rows, cols, 1+i)
# dimshow(subimg, ticks=False)
# #plt.colorbar()
# ps.savefig()
maxes = []
# plt.clf()
for i,subimg in enumerate(subimgs):
# plt.subplot(rows, cols, 1+i)
mx = subimg.max()
maxes.append(mx)
# logmx = np.log10(mx)
# dimshow(np.log10(np.maximum(subimg, mx*1e-16)), vmin=0, vmax=logmx,
# ticks=False)
# ps.savefig()
origpsfimgs = []
unitpsfimgs = []
psfimgs = []
YY = np.linspace(0, psfex.H, psfex.ny)
XX = np.linspace(0, psfex.W, psfex.nx)
psfgrid = []
psfcropgrid = []
crop = 10
for y in YY:
for x in XX:
psfimg = psfex.instantiateAt(x, y)
psfgrid.append(psfimg)
h,w = psfimg.shape
img = psfimg[h/2-crop:h/2+crop+1, w/2-crop:w/2+crop+1]
psfcropgrid.append(img)
mx = np.max([psfimg.max() for psfimg in psfgrid])
logmx = np.log10(mx)
plt.clf()
for i,psfimg in enumerate(psfcropgrid):
#plt.subplot(len(YY), len(XX), i+1)
subplot_grid(len(YY), len(XX), i)
dimshow(np.log10(np.maximum(psfimg, mx*1e-16)),
vmax=logmx, vmin=logmx-4, ticks=False, cmap='jet')
plt.suptitle('PsfEx models')
ps.savefig()
psfex.savesplinedata = True
# psfex.ensureFit()
psfex._fitParamGrid(damp=1)
pp,XX,YY = psfex.splinedata
# Convert to GaussianMixturePSF
ppvar = np.zeros_like(pp)
for iy in range(ny):
for ix in range(nx):
psf = GaussianMixtureEllipsePSF(*pp[iy, ix, :])
mog = psf.toMog()
ppvar[iy,ix,:] = mog.getParams()
psfexvar = PsfEx(im.psffn, W, H, ny=psfex.ny, nx=psfex.nx,
psfClass=GaussianMixturePSF)
#psfexvar.fitSavedData(ppvar, XX, YY)
fitsio.write('psfex-variances.fits', ppvar, clobber=True)
modgrid = []
for iy,y in enumerate(YY):
for ix,x in enumerate(XX):
psf = psfex.psfAt(x,y)
psf.radius = crop
modimg = psf.getPointSourcePatch(0., 0.)
modgrid.append(modimg.patch)
# pp = []
# px0 = None
# for iy,y in enumerate(YY):
# pprow = []
# p0 = px0
# for ix,x in enumerate(XX):
# psfimg = psfex.instantiateAt(x, y)
# h,w = psfimg.shape
# cropped = psfimg[h/2-crop:h/2+crop+1, w/2-crop:w/2+crop+1]
#
# epsf = GaussianMixtureEllipsePSF.fromStamp(psfimg, P0=p0, damp=1.)
# p0 = epsf.getParams()
# if ix == 0:
# px0 = p0
#
# epsf.radius = crop
# modimg = epsf.getPointSourcePatch(0., 0.)
# modgrid.append(modimg.patch)
#
# if iy == 0 and False:
# plt.clf()
# plt.subplot(3,1,1)
# dimshow(np.log10(np.maximum(cropped, mx*1e-16)),
# vmax=logmx, vmin=logmx-4, ticks=False, cmap='jet')
# plt.subplot(3,1,2)
# dimshow(np.log10(np.maximum(modimg.patch, mx*1e-16)),
# vmax=logmx, vmin=logmx-4, ticks=False, cmap='jet')
# ax = plt.axis()
# angle = np.linspace(0., 2.*np.pi, 20)
# xx,yy = np.sin(angle), np.cos(angle)
# xy = np.vstack((xx,yy))
# for i,ell in enumerate(epsf.ellipses):
# mx,my = epsf.mog.mean[i,:]
# T = ell.getRaDecBasis()
# T *= 3600.
# txy = np.dot(T, xy)
# plt.plot(crop + mx + txy[0,:],
# crop + my + txy[1,:], 'k-', lw=1.5)
# plt.axis(ax)
# plt.subplot(3,1,3)
# dimshow(cropped - modimg.patch,
# vmin=-0.001, vmax=0.001, ticks=False, cmap='RdBu')
# ps.savefig()
#
# print 'Fit PSF:', epsf
# #print 'Params:', epsf.getAllParams()
# #repsf = GaussianMixtureEllipsePSF(*epsf.getAllParams())
# #print 'Reconstructed:', repsf
#
# params = np.array(epsf.getAllParams())
# pprow.append(params)
# pp.append(pprow)
# pp = np.array(pp)
# print 'pp', pp.shape
fitsio.write('psfex-ellipses.fits', pp, clobber=True)
plt.clf()
for i,modimg in enumerate(modgrid):
subplot_grid(len(YY), len(XX), i)
#plt.subplot(len(YY), len(XX), i+1)
dimshow(np.log10(np.maximum(modimg, mx*1e-16)),
vmax=logmx, vmin=logmx-4, ticks=False, cmap='jet')
plt.suptitle('PsfEx: Mixture-of-Gaussian fits')
ps.savefig()
plt.clf()
for i,(psfimg,modimg) in enumerate(zip(psfcropgrid,modgrid)):
subplot_grid(len(YY), len(XX), i)
#plt.subplot(len(YY), len(XX), i+1)
diff = psfimg - modimg
print 'Max diff:', np.abs(diff).max()
dimshow(psfimg - modimg, vmin=-0.001, vmax=0.001,
ticks=False, cmap='RdBu')
plt.suptitle('PsfEx: Pixelized - Mixture-of-Gaussian')
ps.savefig()
ny,nx,nparams = pp.shape
names = psf.getParamNames()
plt.figure(figsize=(10,10))
#iii = [ [i + j for j in [0,3,4,9,10,11]] for i in [0,1,2] ]
iii = [ [0, 3,4, 9,10,11],
[1, 5,6, 12,13,14],
[2, 7,8, 15,16,17] ]
for ii in iii:
plt.clf()
for j,ip in enumerate(ii):
plt.subplot(2,3, j+1)
print 'Param', names[ip]
print pp[:,:,ip]
# param values from other components
kpp = np.hstack([pp[:,:,kii[j]].ravel() for kii in iii])
mn,mx = kpp.min(), kpp.max()
dimshow(pp[:,:,ip], cmap='jet', ticks=False, vmin=mn, vmax=mx)
plt.colorbar()
plt.title(names[ip])
plt.suptitle('Mixture of Gaussian models: spatial variation of parameters')
ps.savefig()
# for ip in range(nparams):
# plt.clf()
# dimshow(pp[:,:,ip], ticks=False)
# plt.colorbar()
# plt.title(names[ip])
# ps.savefig()
sys.exit(0)
psfex.savesplinedata = True
print 'Fitting PsfEx model...'
psfex.ensureFit()
modgrid = []
for y in YY:
for x in XX:
mog = psfex.psfAt(x, y)
mog.radius = crop
modimg = mog.getPointSourcePatch(0., 0.)
print 'Patch shape', modimg.shape
modgrid.append(modimg.patch)
plt.clf()
for i,modimg in enumerate(modgrid):
plt.subplot(len(YY), len(XX), i+1)
#h,w = psfimg.shape
#img = psfimg[h/2-crop:h/2+crop, w/2-crop:w/2+crop]
dimshow(np.log10(np.maximum(modimg, mx*1e-16)),
vmax=logmx, vmin=logmx-4, ticks=False, cmap='jet')
plt.suptitle('PsfEx: Mixture-of-Gaussian fits')
ps.savefig()
plt.clf()
for i,(psfimg,modimg) in enumerate(zip(psfcropgrid,modgrid)):
plt.subplot(len(YY), len(XX), i+1)
diff = psfimg - modimg
print 'Max diff:', np.abs(diff).max()
dimshow(psfimg - modimg, vmin=-0.01, vmax=0.01,
ticks=False, cmap='jet')
plt.suptitle('PsfEx: Pixelized - Mixture-of-Gaussian')
ps.savefig()
sys.exit(0)
plt.clf()
for i,subimg in enumerate(subimgs):
s = S[i]
plt.subplot(rows, cols, 1+i)
# Sum the flux near the core...
ss = 5
flux = np.sum(img[s.iy-ss:s.iy+ss+1, s.ix-ss:s.ix+ss+1])
# subtract off the 3*sig we added
flux -= (2*ss+1)**2 * sigoff
psfimg = psfex.instantiateAt(s.x, s.y)
origpsfimgs.append(psfimg)
if True:
from astrometry.util.util import lanczos3_interpolate
dx,dy = s.x - s.ix, s.y - s.iy
#print 'dx,dy', dx,dy
ph,pw = psfimg.shape
ix,iy = np.meshgrid(np.arange(pw), np.arange(ph))
ix = ix.ravel().astype(np.int32)
iy = iy.ravel().astype(np.int32)
nn = len(ix)
laccs = [np.zeros(nn, np.float32)]
rtn = lanczos3_interpolate(ix, iy,
-dx+np.zeros(len(ix),np.float32),
-dy+np.zeros(len(ix),np.float32),
laccs, [psfimg.astype(np.float32)])
psfimg = laccs[0].reshape(psfimg.shape)
unitpsfimgs.append(psfimg)
psfimg = psfimg * flux + sigoff
psfimgs.append(psfimg)
mx = maxes[i]
#mx = psfimg.max()
logmx = np.log10(mx)
dimshow(np.log10(np.maximum(psfimg, mx*1e-16)), vmin=0, vmax=logmx,
ticks=False)
ps.savefig()
# plt.clf()
# for i,(subimg,psfimg) in enumerate(zip(subimgs, psfimgs)):
# plt.subplot(rows, cols, 1+i)
# dimshow(subimg - psfimg, ticks=False)
# ps.savefig()
plt.clf()
for i,(subimg,psfimg) in enumerate(zip(subimgs, psfimgs)):
# Re-scale psfimgs
h,w = subimg.shape
A = np.zeros((h*w, 2))
A[:,0] = 1.
A[:,1] = psfimg.ravel()
b = subimg.ravel()
x,resid,rank,s = np.linalg.lstsq(A, b)
#print 'x', x
#psfimg = x[0] + psfimg * x[1]
psfimg *= x[1]
psfimg += x[0]
res = subimg-psfimg
#print 'range', res.min(), res.max()
plt.subplot(rows, cols, 1+i)
dimshow(subimg - psfimg, ticks=False, vmin=-5, vmax=5)
plt.suptitle('Image - PixPSF')
ps.savefig()
# Re-fit the centers
srcs = []
tims = []
plt.clf()
for i,(subimg,upsfimg,opsfimg) in enumerate(zip(
subimgs, unitpsfimgs, origpsfimgs)):
s = S[i]
print
print
A = np.zeros((h*w, 2))
A[:,0] = 1.
A[:,1] = upsfimg.ravel()
b = subimg.ravel()
x,resid,rank,nil = np.linalg.lstsq(A, b)
sky = x[0]
flux = x[1]
print 'Flux', flux
print 'Sky', sky
tim = Image(data=subimg, invvar=np.ones_like(subimg),
psf=PixelizedPSF(opsfimg), sky=ConstantSky(sky))
tim.modelMinval = 1e-8
tim.getWcs().pixscale = 3600.
h,w = tim.shape
src = PointSource(PixPos(w/2 + (s.x-s.ix), h/2 + (s.y-s.iy)),
Flux(flux))
tr = Tractor([tim],[src])
tr.freezeParam('images')
print 'src', src
for step in range(20):
dlnp,X,alpha = tr.optimize(shared_params=False)
print 'dlnp', dlnp, 'src', src
if dlnp < 1e-6:
break
srcs.append(src)
tims.append(tim)
plt.subplot(rows, cols, 1+i)
dimshow(subimg - tr.getModelImage(0), ticks=False, vmin=-5, vmax=5)
plt.suptitle('Image - PixPSF model')
ps.savefig()
# Fit MoG model to each one
mogs = []
plt.clf()
for i,(subimg,psfimg,tim,src) in enumerate(zip(
subimgs, origpsfimgs, tims, srcs)):
s = S[i]
print 'PSF image sum', psfimg.sum()
#mog = psfex.psfAt(s.x, s.y)
mog = GaussianMixturePSF.fromStamp(psfimg)
mogs.append(mog)
tim.opsf = tim.psf
tim.psf = mog
tr = Tractor([tim],[src])
plt.subplot(rows, cols, 1+i)
dimshow(subimg - tr.getModelImage(0), ticks=False, vmin=-5, vmax=5)
plt.suptitle('Image - MoG model')
ps.savefig()
plt.clf()
for i,(mog,psfimg) in enumerate(zip(mogs, origpsfimgs)):
ph,pw = psfimg.shape
sz = pw/2
#mog.radius = sz
mogimg = mog.getPointSourcePatch(0., 0., radius=sz)
#, extent=[-sz,sz,-sz,sz])
mogimg = mogimg.patch
mx = 0.002
plt.subplot(rows, cols, 1+i)
dimshow(psfimg - mogimg, ticks=False, vmin=-mx, vmax=mx)
#plt.colorbar()
plt.suptitle('PixPSF - MoG')
ps.savefig()
orig_mogs = [mog.copy() for mog in mogs]
# Re-fit the MoG PSF to the pixelized postage stamp
plt.clf()
for i,(mog,psfimg) in enumerate(zip(mogs, origpsfimgs)):
print
print
tim = Image(data=psfimg, invvar=1e6*np.ones_like(psfimg),
psf=mog)
h,w = psfimg.shape
src = PointSource(PixPos(w/2,h/2), Flux(1.))
tr = Tractor([tim], [src])
tr.freezeParam('catalog')
tim.freezeAllBut('psf')
print 'MoG', mog
for step in range(20):
dlnp,X,alpha = tr.optimize(shared_params=False)
print 'dlnp', dlnp, 'mog', mog
if dlnp < 1e-6:
break
sz = w/2
mogimg = mog.getPointSourcePatch(0., 0., radius=sz)
mogimg = mogimg.patch
mx = 0.002
plt.subplot(rows, cols, 1+i)
dimshow(psfimg - mogimg, ticks=False, vmin=-mx, vmax=mx)
plt.suptitle('PixPSF - MoG')
ps.savefig()
# Image - MoG model
plt.clf()
for i,(subimg,tim,src) in enumerate(zip(
subimgs, tims, srcs)):
tr = Tractor([tim],[src])
plt.subplot(rows, cols, 1+i)
dimshow(subimg - tr.getModelImage(0), ticks=False, vmin=-5, vmax=5)
plt.suptitle('Image - MoG model')
ps.savefig()
##############
# Re-fit the MoGs using EllipseESoft basis.
epsfs = []
plt.clf()
for i,(mog,psfimg) in enumerate(zip(orig_mogs, origpsfimgs)):
print
print
ells = [EllipseESoft.fromCovariance(cov)
for cov in mog.mog.var]
psf = GaussianMixtureEllipsePSF(mog.mog.amp, mog.mog.mean, ells)
tim = Image(data=psfimg, invvar=1e6*np.ones_like(psfimg),
psf=psf)
epsfs.append(psf)
h,w = psfimg.shape
src = PointSource(PixPos(w/2,h/2), Flux(1.))
tr = Tractor([tim], [src])
tr.freezeParam('catalog')
tim.freezeAllBut('psf')
print 'PSF', psf
for step in range(20):
dlnp,X,alpha = tr.optimize(shared_params=False)
print 'dlnp', dlnp, 'psf', psf
if dlnp < 1e-6:
break
sz = w/2
mogimg = psf.getPointSourcePatch(0., 0., radius=sz)
mogimg = mogimg.patch
mx = 0.002
plt.subplot(rows, cols, 1+i)
dimshow(psfimg - mogimg, ticks=False, vmin=-mx, vmax=mx)
plt.suptitle('PixPSF - MoG (ellipse)')
ps.savefig()
# Update the 'tims' with these newly-found PSFs
for psf,tim in zip(epsfs, tims):
tim.mogpsf = tim.psf
tim.psf = psf
# Image - MoG model
# plt.clf()
# for i,(subimg,tim,src) in enumerate(zip(
# subimgs, tims, srcs)):
# tr = Tractor([tim],[src])
# plt.subplot(rows, cols, 1+i)
# mod = tr.getModelImage(0)
# dimshow(mod, ticks=False)
# #plt.colorbar()
# plt.suptitle('MoG (ellipse) model')
# ps.savefig()
plt.clf()
for i,(subimg,tim,src,psf) in enumerate(zip(
subimgs, tims, srcs,epsfs)):
print 'Source:', src
print 'PSF:', tim.getPsf()
print 'subimage sum:', subimg.sum()
tr = Tractor([tim],[src])
plt.subplot(rows, cols, 1+i)
mod = tr.getModelImage(0)
print 'mod sum:', mod.sum()
dimshow(subimg - mod, ticks=False, vmin=-5, vmax=5)
plt.suptitle('Image - MoG (ellipse) model')
ps.savefig()
| gpl-2.0 |
prodromou87/gem5 | util/stats/output.py | 90 | 7981 | # Copyright (c) 2005-2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from chart import ChartOptions
class StatOutput(ChartOptions):
def __init__(self, jobfile, info, stat=None):
super(StatOutput, self).__init__()
self.jobfile = jobfile
self.stat = stat
self.invert = False
self.info = info
def display(self, name, printmode = 'G'):
import info
if printmode == 'G':
valformat = '%g'
elif printmode != 'F' and value > 1e6:
valformat = '%0.5e'
else:
valformat = '%f'
for job in self.jobfile.jobs():
value = self.info.get(job, self.stat)
if value is None:
return
if not isinstance(value, list):
value = [ value ]
if self.invert:
for i,val in enumerate(value):
if val != 0.0:
value[i] = 1 / val
valstring = ', '.join([ valformat % val for val in value ])
print '%-50s %s' % (job.name + ':', valstring)
def graph(self, name, graphdir, proxy=None):
from os.path import expanduser, isdir, join as joinpath
from barchart import BarChart
from matplotlib.numerix import Float, array, zeros
import os, re, urllib
from jobfile import crossproduct
confgroups = self.jobfile.groups()
ngroups = len(confgroups)
skiplist = [ False ] * ngroups
groupopts = []
baropts = []
groups = []
for i,group in enumerate(confgroups):
if group.flags.graph_group:
groupopts.append(group.subopts())
skiplist[i] = True
elif group.flags.graph_bars:
baropts.append(group.subopts())
skiplist[i] = True
else:
groups.append(group)
has_group = bool(groupopts)
if has_group:
groupopts = [ group for group in crossproduct(groupopts) ]
else:
groupopts = [ None ]
if baropts:
baropts = [ bar for bar in crossproduct(baropts) ]
else:
raise AttributeError, 'No group selected for graph bars'
directory = expanduser(graphdir)
if not isdir(directory):
os.mkdir(directory)
html = file(joinpath(directory, '%s.html' % name), 'w')
print >>html, '<html>'
print >>html, '<title>Graphs for %s</title>' % name
print >>html, '<body>'
html.flush()
for options in self.jobfile.options(groups):
chart = BarChart(self)
data = [ [ None ] * len(baropts) for i in xrange(len(groupopts)) ]
enabled = False
stacked = 0
for g,gopt in enumerate(groupopts):
for b,bopt in enumerate(baropts):
if gopt is None:
gopt = []
job = self.jobfile.job(options + gopt + bopt)
if not job:
continue
if proxy:
import db
proxy.dict['system'] = self.info[job.system]
val = self.info.get(job, self.stat)
if val is None:
print 'stat "%s" for job "%s" not found' % \
(self.stat, job)
if isinstance(val, (list, tuple)):
if len(val) == 1:
val = val[0]
else:
stacked = len(val)
data[g][b] = val
if stacked == 0:
for i in xrange(len(groupopts)):
for j in xrange(len(baropts)):
if data[i][j] is None:
data[i][j] = 0.0
else:
for i in xrange(len(groupopts)):
for j in xrange(len(baropts)):
val = data[i][j]
if val is None:
data[i][j] = [ 0.0 ] * stacked
elif len(val) != stacked:
raise ValueError, "some stats stacked, some not"
data = array(data)
if data.sum() == 0:
continue
dim = len(data.shape)
x = data.shape[0]
xkeep = [ i for i in xrange(x) if data[i].sum() != 0 ]
y = data.shape[1]
ykeep = [ i for i in xrange(y) if data[:,i].sum() != 0 ]
data = data.take(xkeep, axis=0)
data = data.take(ykeep, axis=1)
if not has_group:
data = data.take([ 0 ], axis=0)
chart.data = data
bopts = [ baropts[i] for i in ykeep ]
bdescs = [ ' '.join([o.desc for o in opt]) for opt in bopts]
if has_group:
gopts = [ groupopts[i] for i in xkeep ]
gdescs = [ ' '.join([o.desc for o in opt]) for opt in gopts]
if chart.legend is None:
if stacked:
try:
chart.legend = self.info.rcategories
except:
chart.legend = [ str(i) for i in xrange(stacked) ]
else:
chart.legend = bdescs
if chart.xticks is None:
if has_group:
chart.xticks = gdescs
else:
chart.xticks = []
chart.graph()
names = [ opt.name for opt in options ]
descs = [ opt.desc for opt in options ]
if names[0] == 'run':
names = names[1:]
descs = descs[1:]
basename = '%s-%s' % (name, ':'.join(names))
desc = ' '.join(descs)
pngname = '%s.png' % basename
psname = '%s.eps' % re.sub(':', '-', basename)
epsname = '%s.ps' % re.sub(':', '-', basename)
chart.savefig(joinpath(directory, pngname))
chart.savefig(joinpath(directory, epsname))
chart.savefig(joinpath(directory, psname))
html_name = urllib.quote(pngname)
print >>html, '''%s<br><img src="%s"><br>''' % (desc, html_name)
html.flush()
print >>html, '</body>'
print >>html, '</html>'
html.close()
| bsd-3-clause |
toobaz/pandas | pandas/tests/groupby/test_counting.py | 2 | 7697 | from itertools import product
import numpy as np
import pytest
from pandas import DataFrame, MultiIndex, Period, Series, Timedelta, Timestamp
from pandas.util.testing import assert_frame_equal, assert_series_equal
class TestCounting:
def test_cumcount(self):
df = DataFrame([["a"], ["a"], ["a"], ["b"], ["a"]], columns=["A"])
g = df.groupby("A")
sg = g.A
expected = Series([0, 1, 2, 0, 3])
assert_series_equal(expected, g.cumcount())
assert_series_equal(expected, sg.cumcount())
def test_cumcount_empty(self):
ge = DataFrame().groupby(level=0)
se = Series().groupby(level=0)
# edge case, as this is usually considered float
e = Series(dtype="int64")
assert_series_equal(e, ge.cumcount())
assert_series_equal(e, se.cumcount())
def test_cumcount_dupe_index(self):
df = DataFrame(
[["a"], ["a"], ["a"], ["b"], ["a"]], columns=["A"], index=[0] * 5
)
g = df.groupby("A")
sg = g.A
expected = Series([0, 1, 2, 0, 3], index=[0] * 5)
assert_series_equal(expected, g.cumcount())
assert_series_equal(expected, sg.cumcount())
def test_cumcount_mi(self):
mi = MultiIndex.from_tuples([[0, 1], [1, 2], [2, 2], [2, 2], [1, 0]])
df = DataFrame([["a"], ["a"], ["a"], ["b"], ["a"]], columns=["A"], index=mi)
g = df.groupby("A")
sg = g.A
expected = Series([0, 1, 2, 0, 3], index=mi)
assert_series_equal(expected, g.cumcount())
assert_series_equal(expected, sg.cumcount())
def test_cumcount_groupby_not_col(self):
df = DataFrame(
[["a"], ["a"], ["a"], ["b"], ["a"]], columns=["A"], index=[0] * 5
)
g = df.groupby([0, 0, 0, 1, 0])
sg = g.A
expected = Series([0, 1, 2, 0, 3], index=[0] * 5)
assert_series_equal(expected, g.cumcount())
assert_series_equal(expected, sg.cumcount())
def test_ngroup(self):
df = DataFrame({"A": list("aaaba")})
g = df.groupby("A")
sg = g.A
expected = Series([0, 0, 0, 1, 0])
assert_series_equal(expected, g.ngroup())
assert_series_equal(expected, sg.ngroup())
def test_ngroup_distinct(self):
df = DataFrame({"A": list("abcde")})
g = df.groupby("A")
sg = g.A
expected = Series(range(5), dtype="int64")
assert_series_equal(expected, g.ngroup())
assert_series_equal(expected, sg.ngroup())
def test_ngroup_one_group(self):
df = DataFrame({"A": [0] * 5})
g = df.groupby("A")
sg = g.A
expected = Series([0] * 5)
assert_series_equal(expected, g.ngroup())
assert_series_equal(expected, sg.ngroup())
def test_ngroup_empty(self):
ge = DataFrame().groupby(level=0)
se = Series().groupby(level=0)
# edge case, as this is usually considered float
e = Series(dtype="int64")
assert_series_equal(e, ge.ngroup())
assert_series_equal(e, se.ngroup())
def test_ngroup_series_matches_frame(self):
df = DataFrame({"A": list("aaaba")})
s = Series(list("aaaba"))
assert_series_equal(df.groupby(s).ngroup(), s.groupby(s).ngroup())
def test_ngroup_dupe_index(self):
df = DataFrame({"A": list("aaaba")}, index=[0] * 5)
g = df.groupby("A")
sg = g.A
expected = Series([0, 0, 0, 1, 0], index=[0] * 5)
assert_series_equal(expected, g.ngroup())
assert_series_equal(expected, sg.ngroup())
def test_ngroup_mi(self):
mi = MultiIndex.from_tuples([[0, 1], [1, 2], [2, 2], [2, 2], [1, 0]])
df = DataFrame({"A": list("aaaba")}, index=mi)
g = df.groupby("A")
sg = g.A
expected = Series([0, 0, 0, 1, 0], index=mi)
assert_series_equal(expected, g.ngroup())
assert_series_equal(expected, sg.ngroup())
def test_ngroup_groupby_not_col(self):
df = DataFrame({"A": list("aaaba")}, index=[0] * 5)
g = df.groupby([0, 0, 0, 1, 0])
sg = g.A
expected = Series([0, 0, 0, 1, 0], index=[0] * 5)
assert_series_equal(expected, g.ngroup())
assert_series_equal(expected, sg.ngroup())
def test_ngroup_descending(self):
df = DataFrame(["a", "a", "b", "a", "b"], columns=["A"])
g = df.groupby(["A"])
ascending = Series([0, 0, 1, 0, 1])
descending = Series([1, 1, 0, 1, 0])
assert_series_equal(descending, (g.ngroups - 1) - ascending)
assert_series_equal(ascending, g.ngroup(ascending=True))
assert_series_equal(descending, g.ngroup(ascending=False))
def test_ngroup_matches_cumcount(self):
# verify one manually-worked out case works
df = DataFrame(
[["a", "x"], ["a", "y"], ["b", "x"], ["a", "x"], ["b", "y"]],
columns=["A", "X"],
)
g = df.groupby(["A", "X"])
g_ngroup = g.ngroup()
g_cumcount = g.cumcount()
expected_ngroup = Series([0, 1, 2, 0, 3])
expected_cumcount = Series([0, 0, 0, 1, 0])
assert_series_equal(g_ngroup, expected_ngroup)
assert_series_equal(g_cumcount, expected_cumcount)
def test_ngroup_cumcount_pair(self):
# brute force comparison for all small series
for p in product(range(3), repeat=4):
df = DataFrame({"a": p})
g = df.groupby(["a"])
order = sorted(set(p))
ngroupd = [order.index(val) for val in p]
cumcounted = [p[:i].count(val) for i, val in enumerate(p)]
assert_series_equal(g.ngroup(), Series(ngroupd))
assert_series_equal(g.cumcount(), Series(cumcounted))
def test_ngroup_respects_groupby_order(self):
np.random.seed(0)
df = DataFrame({"a": np.random.choice(list("abcdef"), 100)})
for sort_flag in (False, True):
g = df.groupby(["a"], sort=sort_flag)
df["group_id"] = -1
df["group_index"] = -1
for i, (_, group) in enumerate(g):
df.loc[group.index, "group_id"] = i
for j, ind in enumerate(group.index):
df.loc[ind, "group_index"] = j
assert_series_equal(Series(df["group_id"].values), g.ngroup())
assert_series_equal(Series(df["group_index"].values), g.cumcount())
@pytest.mark.parametrize(
"datetimelike",
[
[
Timestamp("2016-05-{i:02d} 20:09:25+00:00".format(i=i))
for i in range(1, 4)
],
[Timestamp("2016-05-{i:02d} 20:09:25".format(i=i)) for i in range(1, 4)],
[Timedelta(x, unit="h") for x in range(1, 4)],
[Period(freq="2W", year=2017, month=x) for x in range(1, 4)],
],
)
def test_count_with_datetimelike(self, datetimelike):
# test for #13393, where DataframeGroupBy.count() fails
# when counting a datetimelike column.
df = DataFrame({"x": ["a", "a", "b"], "y": datetimelike})
res = df.groupby("x").count()
expected = DataFrame({"y": [2, 1]}, index=["a", "b"])
expected.index.name = "x"
assert_frame_equal(expected, res)
def test_count_with_only_nans_in_first_group(self):
# GH21956
df = DataFrame({"A": [np.nan, np.nan], "B": ["a", "b"], "C": [1, 2]})
result = df.groupby(["A", "B"]).C.count()
mi = MultiIndex(levels=[[], ["a", "b"]], codes=[[], []], names=["A", "B"])
expected = Series([], index=mi, dtype=np.int64, name="C")
assert_series_equal(result, expected, check_index_type=False)
| bsd-3-clause |
ashhher3/scikit-learn | sklearn/linear_model/ransac.py | 22 | 14007 | # coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..utils import check_random_state, check_array, check_consistent_length
from ..utils.random import sample_without_replacement
from ..utils.validation import check_is_fitted
from .base import LinearRegression
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. More information can
be found in the general documentation of linear models.
A detailed description of the algorithm can be found in the documentation
of the ``linear_model`` sub-package.
Parameters
----------
base_estimator : object, optional
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
If `base_estimator` is None, then
``base_estimator=sklearn.linear_model.LinearRegression()`` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), optional
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, optional
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_n_inliers : int, optional
Stop iteration if at least this number of inliers are found.
stop_score : float, optional
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
residual_metric : callable, optional
Metric to reduce the dimensionality of the residuals to 1 for
multi-dimensional target values ``y.shape[1] > 1``. By default the sum
of absolute differences is used::
lambda dy: np.sum(np.abs(dy), axis=1)
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] http://en.wikipedia.org/wiki/RANSAC
.. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
def __init__(self, base_estimator=None, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, residual_metric=None,
random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.residual_metric = residual_metric
self.random_state = random_state
def fit(self, X, y):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
if y.ndim == 1:
y = y.reshape(-1, 1)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X.shape[0]``.")
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.residual_metric is None:
residual_metric = lambda dy: np.sum(np.abs(dy), axis=1)
else:
residual_metric = self.residual_metric
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
n_inliers_best = 0
score_best = np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
for self.n_trials_ in range(1, self.max_trials + 1):
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
continue
# fit model for current random sample set
base_estimator.fit(X_subset, y_subset)
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
if y_pred.ndim == 1:
y_pred = y_pred[:, None]
residuals_subset = residual_metric(y_pred - y)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
continue
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
# break if sufficient number of inliers or score is reached
if (n_inliers_best >= self.stop_n_inliers
or score_best >= self.stop_score
or self.n_trials_
>= _dynamic_max_trials(n_inliers_best, n_samples,
min_samples,
self.stop_probability)):
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
raise ValueError(
"RANSAC could not find valid consensus set, because"
" either the `residual_threshold` rejected all the samples or"
" `is_data_valid` and `is_model_valid` returned False for all"
" `max_trials` randomly ""chosen sub-samples. Consider "
"relaxing the ""constraints.")
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.score(X, y)
| bsd-3-clause |
carrillo/scikit-learn | examples/plot_multilabel.py | 236 | 4157 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
| bsd-3-clause |
louispotok/pandas | pandas/tests/io/parser/index_col.py | 20 | 5352 | # -*- coding: utf-8 -*-
"""
Tests that the specified index column (a.k.a 'index_col')
is properly handled or inferred during parsing for all of
the parsers defined in parsers.py
"""
import pytest
import pandas.util.testing as tm
from pandas import DataFrame, Index, MultiIndex
from pandas.compat import StringIO
class IndexColTests(object):
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n" # noqa
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
pytest.raises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
assert xp.index.name == rs.index.name
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
assert xp.index.name == rs.index.name
def test_index_col_is_true(self):
# see gh-9798
pytest.raises(ValueError, self.read_csv,
StringIO(self.ts_data), index_col=True)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
assert data.index.equals(Index(['foo', 'bar', 'baz']))
def test_empty_index_col_scenarios(self):
data = 'x,y,z'
# None, no index
index_col, expected = None, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# False, no index
index_col, expected = False, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, first column
index_col, expected = 0, DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, not first column
index_col, expected = 1, DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, first column
index_col, expected = 'x', DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, not the first column
index_col, expected = 'y', DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# list of int
index_col, expected = [0, 1], DataFrame(
[], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col),
expected, check_index_type=False)
# list of str
index_col = ['x', 'y']
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(
data), index_col=index_col),
expected, check_index_type=False)
# list of int, reversed sequence
index_col = [1, 0]
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col),
expected, check_index_type=False)
# list of str, reversed sequence
index_col = ['y', 'x']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(
data), index_col=index_col),
expected, check_index_type=False)
def test_empty_with_index_col_false(self):
# see gh-10413
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame([], columns=['x', 'y'])
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
AlexCatarino/Lean | Algorithm.Python/WarmupHistoryAlgorithm.py | 3 | 2886 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from AlgorithmImports import *
### <summary>
### This algorithm demonstrates using the history provider to retrieve data
### to warm up indicators before data is received.
### </summary>
### <meta name="tag" content="indicators" />
### <meta name="tag" content="history" />
### <meta name="tag" content="history and warm up" />
### <meta name="tag" content="using data" />
class WarmupHistoryAlgorithm(QCAlgorithm):
def Initialize(self):
'''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.SetStartDate(2014,5,2) #Set Start Date
self.SetEndDate(2014,5,2) #Set End Date
self.SetCash(100000) #Set Strategy Cash
# Find more symbols here: http://quantconnect.com/data
forex = self.AddForex("EURUSD", Resolution.Second)
forex = self.AddForex("NZDUSD", Resolution.Second)
fast_period = 60
slow_period = 3600
self.fast = self.EMA("EURUSD", fast_period)
self.slow = self.EMA("EURUSD", slow_period)
# "slow_period + 1" because rolling window waits for one to fall off the back to be considered ready
# History method returns a dict with a pandas.DataFrame
history = self.History(["EURUSD", "NZDUSD"], slow_period + 1)
# prints out the tail of the dataframe
self.Log(str(history.loc["EURUSD"].tail()))
self.Log(str(history.loc["NZDUSD"].tail()))
for index, row in history.loc["EURUSD"].iterrows():
self.fast.Update(index, row["close"])
self.slow.Update(index, row["close"])
self.Log("FAST {0} READY. Samples: {1}".format("IS" if self.fast.IsReady else "IS NOT", self.fast.Samples))
self.Log("SLOW {0} READY. Samples: {1}".format("IS" if self.slow.IsReady else "IS NOT", self.slow.Samples))
def OnData(self, data):
'''OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here.'''
if self.fast.Current.Value > self.slow.Current.Value:
self.SetHoldings("EURUSD", 1)
else:
self.SetHoldings("EURUSD", -1)
| apache-2.0 |
e-q/scipy | scipy/spatial/kdtree.py | 3 | 24169 | # Copyright Anne M. Archibald 2008
# Released under the scipy license
import numpy as np
import warnings
from .ckdtree import cKDTree, cKDTreeNode
__all__ = ['minkowski_distance_p', 'minkowski_distance',
'distance_matrix',
'Rectangle', 'KDTree']
def minkowski_distance_p(x, y, p=2):
"""
Compute the pth power of the L**p distance between two arrays.
For efficiency, this function computes the L**p distance but does
not extract the pth root. If `p` is 1 or infinity, this is equal to
the actual L**p distance.
Parameters
----------
x : (M, K) array_like
Input array.
y : (N, K) array_like
Input array.
p : float, 1 <= p <= infinity
Which Minkowski p-norm to use.
Examples
--------
>>> from scipy.spatial import minkowski_distance_p
>>> minkowski_distance_p([[0,0],[0,0]], [[1,1],[0,1]])
array([2, 1])
"""
x = np.asarray(x)
y = np.asarray(y)
# Find smallest common datatype with float64 (return type of this function) - addresses #10262.
# Don't just cast to float64 for complex input case.
common_datatype = np.promote_types(np.promote_types(x.dtype, y.dtype), 'float64')
# Make sure x and y are NumPy arrays of correct datatype.
x = x.astype(common_datatype)
y = y.astype(common_datatype)
if p == np.inf:
return np.amax(np.abs(y-x), axis=-1)
elif p == 1:
return np.sum(np.abs(y-x), axis=-1)
else:
return np.sum(np.abs(y-x)**p, axis=-1)
def minkowski_distance(x, y, p=2):
"""
Compute the L**p distance between two arrays.
Parameters
----------
x : (M, K) array_like
Input array.
y : (N, K) array_like
Input array.
p : float, 1 <= p <= infinity
Which Minkowski p-norm to use.
Examples
--------
>>> from scipy.spatial import minkowski_distance
>>> minkowski_distance([[0,0],[0,0]], [[1,1],[0,1]])
array([ 1.41421356, 1. ])
"""
x = np.asarray(x)
y = np.asarray(y)
if p == np.inf or p == 1:
return minkowski_distance_p(x, y, p)
else:
return minkowski_distance_p(x, y, p)**(1./p)
class Rectangle(object):
"""Hyperrectangle class.
Represents a Cartesian product of intervals.
"""
def __init__(self, maxes, mins):
"""Construct a hyperrectangle."""
self.maxes = np.maximum(maxes,mins).astype(float)
self.mins = np.minimum(maxes,mins).astype(float)
self.m, = self.maxes.shape
def __repr__(self):
return "<Rectangle %s>" % list(zip(self.mins, self.maxes))
def volume(self):
"""Total volume."""
return np.prod(self.maxes-self.mins)
def split(self, d, split):
"""
Produce two hyperrectangles by splitting.
In general, if you need to compute maximum and minimum
distances to the children, it can be done more efficiently
by updating the maximum and minimum distances to the parent.
Parameters
----------
d : int
Axis to split hyperrectangle along.
split : float
Position along axis `d` to split at.
"""
mid = np.copy(self.maxes)
mid[d] = split
less = Rectangle(self.mins, mid)
mid = np.copy(self.mins)
mid[d] = split
greater = Rectangle(mid, self.maxes)
return less, greater
def min_distance_point(self, x, p=2.):
"""
Return the minimum distance between input and points in the hyperrectangle.
Parameters
----------
x : array_like
Input.
p : float, optional
Input.
"""
return minkowski_distance(0, np.maximum(0,np.maximum(self.mins-x,x-self.maxes)),p)
def max_distance_point(self, x, p=2.):
"""
Return the maximum distance between input and points in the hyperrectangle.
Parameters
----------
x : array_like
Input array.
p : float, optional
Input.
"""
return minkowski_distance(0, np.maximum(self.maxes-x,x-self.mins),p)
def min_distance_rectangle(self, other, p=2.):
"""
Compute the minimum distance between points in the two hyperrectangles.
Parameters
----------
other : hyperrectangle
Input.
p : float
Input.
"""
return minkowski_distance(0, np.maximum(0,np.maximum(self.mins-other.maxes,other.mins-self.maxes)),p)
def max_distance_rectangle(self, other, p=2.):
"""
Compute the maximum distance between points in the two hyperrectangles.
Parameters
----------
other : hyperrectangle
Input.
p : float, optional
Input.
"""
return minkowski_distance(0, np.maximum(self.maxes-other.mins,other.maxes-self.mins),p)
class KDTree(cKDTree):
"""
kd-tree for quick nearest-neighbor lookup
This class provides an index into a set of k-D points which
can be used to rapidly look up the nearest neighbors of any point.
Parameters
----------
data : (N,K) array_like
The data points to be indexed. This array is not copied, and
so modifying this data will result in bogus results.
leafsize : int, optional
The number of points at which the algorithm switches over to
brute-force. Has to be positive.
Notes
-----
The algorithm used is described in Maneewongvatana and Mount 1999.
The general idea is that the kd-tree is a binary tree, each of whose
nodes represents an axis-aligned hyperrectangle. Each node specifies
an axis and splits the set of points based on whether their coordinate
along that axis is greater than or less than a particular value.
During construction, the axis and splitting point are chosen by the
"sliding midpoint" rule, which ensures that the cells do not all
become long and thin.
The tree can be queried for the r closest neighbors of any given point
(optionally returning only those within some maximum distance of the
point). It can also be queried, with a substantial gain in efficiency,
for the r approximate closest neighbors.
For large dimensions (20 is already large) do not expect this to run
significantly faster than brute force. High-dimensional nearest-neighbor
queries are a substantial open problem in computer science.
The tree also supports all-neighbors queries, both with arrays of points
and with other kd-trees. These do use a reasonably efficient algorithm,
but the kd-tree is not necessarily the best data structure for this
sort of calculation.
"""
class node:
@staticmethod
def _create(ckdtree_node=None):
"""Create either an inner or leaf node, wrapping a cKDTreeNode instance"""
if ckdtree_node is None:
return KDTree.node(ckdtree_node)
elif ckdtree_node.split_dim == -1:
return KDTree.leafnode(ckdtree_node)
else:
return KDTree.innernode(ckdtree_node)
def __init__(self, ckdtree_node=None):
if ckdtree_node is None:
ckdtree_node = cKDTreeNode()
self._node = ckdtree_node
def __lt__(self, other):
return id(self) < id(other)
def __gt__(self, other):
return id(self) > id(other)
def __le__(self, other):
return id(self) <= id(other)
def __ge__(self, other):
return id(self) >= id(other)
def __eq__(self, other):
return id(self) == id(other)
class leafnode(node):
@property
def idx(self):
return self._node.indices
@property
def children(self):
return self._node.children
class innernode(node):
def __init__(self, ckdtreenode):
assert isinstance(ckdtreenode, cKDTreeNode)
super().__init__(ckdtreenode)
self.less = KDTree.node._create(ckdtreenode.lesser)
self.greater = KDTree.node._create(ckdtreenode.greater)
@property
def split_dim(self):
return self._node.split_dim
@property
def split(self):
return self._node.split
@property
def children(self):
return self._node.children
@property
def tree(self):
if not hasattr(self, "_tree"):
self._tree = KDTree.node._create(super().tree)
return self._tree
def __init__(self, data, leafsize=10):
data = np.asarray(data)
if data.dtype.kind == 'c':
raise TypeError("KDTree does not work with complex data")
# Note KDTree has different default leafsize from cKDTree
super().__init__(data, leafsize)
def query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf):
"""
Query the kd-tree for nearest neighbors
Parameters
----------
x : array_like, last dimension self.m
An array of points to query.
k : int, optional
The number of nearest neighbors to return.
eps : nonnegative float, optional
Return approximate nearest neighbors; the kth returned value
is guaranteed to be no further than (1+eps) times the
distance to the real kth nearest neighbor.
p : float, 1<=p<=infinity, optional
Which Minkowski p-norm to use.
1 is the sum-of-absolute-values "Manhattan" distance
2 is the usual Euclidean distance
infinity is the maximum-coordinate-difference distance
distance_upper_bound : nonnegative float, optional
Return only neighbors within this distance. This is used to prune
tree searches, so if you are doing a series of nearest-neighbor
queries, it may help to supply the distance to the nearest neighbor
of the most recent point.
Returns
-------
d : float or array of floats
The distances to the nearest neighbors.
If x has shape tuple+(self.m,), then d has shape tuple if
k is one, or tuple+(k,) if k is larger than one. Missing
neighbors (e.g. when k > n or distance_upper_bound is
given) are indicated with infinite distances. If k is None,
then d is an object array of shape tuple, containing lists
of distances. In either case the hits are sorted by distance
(nearest first).
i : integer or array of integers
The locations of the neighbors in self.data. i is the same
shape as d.
Examples
--------
>>> from scipy import spatial
>>> x, y = np.mgrid[0:5, 2:8]
>>> tree = spatial.KDTree(list(zip(x.ravel(), y.ravel())))
>>> tree.data
array([[0, 2],
[0, 3],
[0, 4],
[0, 5],
[0, 6],
[0, 7],
[1, 2],
[1, 3],
[1, 4],
[1, 5],
[1, 6],
[1, 7],
[2, 2],
[2, 3],
[2, 4],
[2, 5],
[2, 6],
[2, 7],
[3, 2],
[3, 3],
[3, 4],
[3, 5],
[3, 6],
[3, 7],
[4, 2],
[4, 3],
[4, 4],
[4, 5],
[4, 6],
[4, 7]])
>>> pts = np.array([[0, 0], [2.1, 2.9]])
>>> tree.query(pts)
(array([ 2. , 0.14142136]), array([ 0, 13]))
>>> tree.query(pts[0])
(2.0, 0)
"""
x = np.asarray(x)
if x.dtype.kind == 'c':
raise TypeError("KDTree does not work with complex data")
if k is None:
# k=None, return all neighbors
warnings.warn(
"KDTree.query with k=None is deprecated and will be removed "
"in SciPy 1.8.0. Use KDTree.query_ball_point instead.",
DeprecationWarning)
# Convert index query to a lists of distance and index,
# sorted by distance
def inds_to_hits(point, neighbors):
dist = minkowski_distance(point, self.data[neighbors], p)
hits = sorted([(d, i) for d, i in zip(dist, neighbors)])
return [d for d, i in hits], [i for d, i in hits]
x = np.asarray(x, dtype=np.float64)
inds = super().query_ball_point(x, distance_upper_bound, p, eps)
if isinstance(inds, list):
return inds_to_hits(x, inds)
dists = np.empty_like(inds)
for idx in np.ndindex(inds.shape):
dists[idx], inds[idx] = inds_to_hits(x[idx], inds[idx])
return dists, inds
d, i = super().query(x, k, eps, p, distance_upper_bound)
if isinstance(i, int):
i = np.intp(i)
return d, i
def query_ball_point(self, x, r, p=2., eps=0):
"""Find all points within distance r of point(s) x.
Parameters
----------
x : array_like, shape tuple + (self.m,)
The point or points to search for neighbors of.
r : positive float
The radius of points to return.
p : float, optional
Which Minkowski p-norm to use. Should be in the range [1, inf].
eps : nonnegative float, optional
Approximate search. Branches of the tree are not explored if their
nearest points are further than ``r / (1 + eps)``, and branches are
added in bulk if their furthest points are nearer than
``r * (1 + eps)``.
Returns
-------
results : list or array of lists
If `x` is a single point, returns a list of the indices of the
neighbors of `x`. If `x` is an array of points, returns an object
array of shape tuple containing lists of neighbors.
Notes
-----
If you have many points whose neighbors you want to find, you may save
substantial amounts of time by putting them in a KDTree and using
query_ball_tree.
Examples
--------
>>> from scipy import spatial
>>> x, y = np.mgrid[0:5, 0:5]
>>> points = np.c_[x.ravel(), y.ravel()]
>>> tree = spatial.KDTree(points)
>>> sorted(tree.query_ball_point([2, 0], 1))
[5, 10, 11, 15]
Query multiple points and plot the results:
>>> import matplotlib.pyplot as plt
>>> points = np.asarray(points)
>>> plt.plot(points[:,0], points[:,1], '.')
>>> for results in tree.query_ball_point(([2, 0], [3, 3]), 1):
... nearby_points = points[results]
... plt.plot(nearby_points[:,0], nearby_points[:,1], 'o')
>>> plt.margins(0.1, 0.1)
>>> plt.show()
"""
x = np.asarray(x)
if x.dtype.kind == 'c':
raise TypeError("KDTree does not work with complex data")
return super().query_ball_point(x, r, p, eps)
def query_ball_tree(self, other, r, p=2., eps=0):
"""
Find all pairs of points between `self` and `other` whose distance is at most r
Parameters
----------
other : KDTree instance
The tree containing points to search against.
r : float
The maximum distance, has to be positive.
p : float, optional
Which Minkowski norm to use. `p` has to meet the condition
``1 <= p <= infinity``.
eps : float, optional
Approximate search. Branches of the tree are not explored
if their nearest points are further than ``r/(1+eps)``, and
branches are added in bulk if their furthest points are nearer
than ``r * (1+eps)``. `eps` has to be non-negative.
Returns
-------
results : list of lists
For each element ``self.data[i]`` of this tree, ``results[i]`` is a
list of the indices of its neighbors in ``other.data``.
Examples
--------
You can search all pairs of points between two kd-trees within a distance:
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from scipy.spatial import KDTree
>>> np.random.seed(21701)
>>> points1 = np.random.random((15, 2))
>>> points2 = np.random.random((15, 2))
>>> plt.figure(figsize=(6, 6))
>>> plt.plot(points1[:, 0], points1[:, 1], "xk", markersize=14)
>>> plt.plot(points2[:, 0], points2[:, 1], "og", markersize=14)
>>> kd_tree1 = KDTree(points1)
>>> kd_tree2 = KDTree(points2)
>>> indexes = kd_tree1.query_ball_tree(kd_tree2, r=0.2)
>>> for i in range(len(indexes)):
... for j in indexes[i]:
... plt.plot([points1[i, 0], points2[j, 0]],
... [points1[i, 1], points2[j, 1]], "-r")
>>> plt.show()
"""
return super().query_ball_tree(other, r, p, eps)
def query_pairs(self, r, p=2., eps=0):
"""
Find all pairs of points in `self` whose distance is at most r.
Parameters
----------
r : positive float
The maximum distance.
p : float, optional
Which Minkowski norm to use. `p` has to meet the condition
``1 <= p <= infinity``.
eps : float, optional
Approximate search. Branches of the tree are not explored
if their nearest points are further than ``r/(1+eps)``, and
branches are added in bulk if their furthest points are nearer
than ``r * (1+eps)``. `eps` has to be non-negative.
Returns
-------
results : set
Set of pairs ``(i,j)``, with ``i < j``, for which the corresponding
positions are close.
Examples
--------
You can search all pairs of points in a kd-tree within a distance:
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from scipy.spatial import KDTree
>>> np.random.seed(21701)
>>> points = np.random.random((20, 2))
>>> plt.figure(figsize=(6, 6))
>>> plt.plot(points[:, 0], points[:, 1], "xk", markersize=14)
>>> kd_tree = KDTree(points)
>>> pairs = kd_tree.query_pairs(r=0.2)
>>> for (i, j) in pairs:
... plt.plot([points[i, 0], points[j, 0]],
... [points[i, 1], points[j, 1]], "-r")
>>> plt.show()
"""
return super().query_pairs(r, p, eps)
def count_neighbors(self, other, r, p=2.):
"""
Count how many nearby pairs can be formed.
Count the number of pairs (x1,x2) can be formed, with x1 drawn
from self and x2 drawn from ``other``, and where
``distance(x1, x2, p) <= r``.
This is the "two-point correlation" described in Gray and Moore 2000,
"N-body problems in statistical learning", and the code here is based
on their algorithm.
Parameters
----------
other : KDTree instance
The other tree to draw points from.
r : float or one-dimensional array of floats
The radius to produce a count for. Multiple radii are searched with
a single tree traversal.
p : float, 1<=p<=infinity, optional
Which Minkowski p-norm to use
Returns
-------
result : int or 1-D array of ints
The number of pairs.
Examples
--------
You can count neighbors number between two kd-trees within a distance:
>>> import numpy as np
>>> from scipy.spatial import KDTree
>>> np.random.seed(21701)
>>> points1 = np.random.random((5, 2))
>>> points2 = np.random.random((5, 2))
>>> kd_tree1 = KDTree(points1)
>>> kd_tree2 = KDTree(points2)
>>> kd_tree1.count_neighbors(kd_tree2, 0.2)
9
This number is same as the total pair number calculated by
`query_ball_tree`:
>>> indexes = kd_tree1.query_ball_tree(kd_tree2, r=0.2)
>>> sum([len(i) for i in indexes])
9
"""
return super().count_neighbors(other, r, p)
def sparse_distance_matrix(self, other, max_distance, p=2.):
"""
Compute a sparse distance matrix
Computes a distance matrix between two KDTrees, leaving as zero
any distance greater than max_distance.
Parameters
----------
other : KDTree
max_distance : positive float
p : float, optional
Returns
-------
result : dok_matrix
Sparse matrix representing the results in "dictionary of keys" format.
Examples
--------
You can compute a sparse distance matrix between two kd-trees:
>>> import numpy as np
>>> from scipy.spatial import KDTree
>>> np.random.seed(21701)
>>> points1 = np.random.random((5, 2))
>>> points2 = np.random.random((5, 2))
>>> kd_tree1 = KDTree(points1)
>>> kd_tree2 = KDTree(points2)
>>> sdm = kd_tree1.sparse_distance_matrix(kd_tree2, 0.3)
>>> sdm.toarray()
array([[0.20220215, 0.14538496, 0., 0.10257199, 0. ],
[0.13491385, 0.27251306, 0., 0.18793787, 0. ],
[0.19262396, 0., 0., 0.25795122, 0. ],
[0.14859639, 0.07076002, 0., 0.04065851, 0. ],
[0.17308768, 0., 0., 0.24823138, 0. ]])
You can check distances above the `max_distance` are zeros:
>>> from scipy.spatial import distance_matrix
>>> distance_matrix(points1, points2)
array([[0.20220215, 0.14538496, 0.43588092, 0.10257199, 0.4555495 ],
[0.13491385, 0.27251306, 0.65944131, 0.18793787, 0.68184154],
[0.19262396, 0.34121593, 0.72176889, 0.25795122, 0.74538858],
[0.14859639, 0.07076002, 0.48505773, 0.04065851, 0.50043591],
[0.17308768, 0.32837991, 0.72760803, 0.24823138, 0.75017239]])
"""
return super().sparse_distance_matrix(other, max_distance, p)
def distance_matrix(x, y, p=2, threshold=1000000):
"""
Compute the distance matrix.
Returns the matrix of all pair-wise distances.
Parameters
----------
x : (M, K) array_like
Matrix of M vectors in K dimensions.
y : (N, K) array_like
Matrix of N vectors in K dimensions.
p : float, 1 <= p <= infinity
Which Minkowski p-norm to use.
threshold : positive int
If ``M * N * K`` > `threshold`, algorithm uses a Python loop instead
of large temporary arrays.
Returns
-------
result : (M, N) ndarray
Matrix containing the distance from every vector in `x` to every vector
in `y`.
Examples
--------
>>> from scipy.spatial import distance_matrix
>>> distance_matrix([[0,0],[0,1]], [[1,0],[1,1]])
array([[ 1. , 1.41421356],
[ 1.41421356, 1. ]])
"""
x = np.asarray(x)
m, k = x.shape
y = np.asarray(y)
n, kk = y.shape
if k != kk:
raise ValueError("x contains %d-dimensional vectors but y contains %d-dimensional vectors" % (k, kk))
if m*n*k <= threshold:
return minkowski_distance(x[:,np.newaxis,:],y[np.newaxis,:,:],p)
else:
result = np.empty((m,n),dtype=float) # FIXME: figure out the best dtype
if m < n:
for i in range(m):
result[i,:] = minkowski_distance(x[i],y,p)
else:
for j in range(n):
result[:,j] = minkowski_distance(x,y[j],p)
return result
| bsd-3-clause |
rknLA/sms-tools | lectures/09-Sound-description/plots-code/hpcp.py | 25 | 1194 | import numpy as np
import matplotlib.pyplot as plt
import essentia.standard as ess
M = 1024
N = 1024
H = 512
fs = 44100
spectrum = ess.Spectrum(size=N)
window = ess.Windowing(size=M, type='hann')
spectralPeaks = ess.SpectralPeaks()
hpcp = ess.HPCP()
x = ess.MonoLoader(filename = '../../../sounds/cello-double.wav', sampleRate = fs)()
hpcps = []
for frame in ess.FrameGenerator(x, frameSize=M, hopSize=H, startFromZero=True):
mX = spectrum(window(frame))
spectralPeaks_freqs, spectralPeaks_mags = spectralPeaks(mX)
hpcp_vals = hpcp(spectralPeaks_freqs, spectralPeaks_mags)
hpcps.append(hpcp_vals)
hpcps = np.array(hpcps)
plt.figure(1, figsize=(9.5, 7))
plt.subplot(2,1,1)
plt.plot(np.arange(x.size)/float(fs), x, 'b')
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.title('x (cello-double.wav)')
plt.subplot(2,1,2)
numFrames = int(hpcps[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.pcolormesh(frmTime, np.arange(12), np.transpose(hpcps))
plt.ylabel('spectral bins')
plt.title('HPCP')
plt.autoscale(tight=True)
plt.tight_layout()
plt.savefig('hpcp.png')
plt.show()
| agpl-3.0 |
wogsland/QSTK | QSTK/qstkutil/qsdateutil.py | 5 | 9008 | '''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on Jan 1, 2011
@author:Drew Bratcher
@contact: [email protected]
@summary: Contains tutorial for backtester and report.
'''
import datetime as dt
from datetime import timedelta
import time as t
import numpy as np
import os
import pandas as pd
def _cache_dates():
''' Caches dates '''
try:
# filename = os.environ['QS'] + "/qstkutil/NYSE_dates.txt"
filename = os.path.join(os.path.dirname(__file__), 'NYSE_dates.txt')
except KeyError:
print "Please be sure you have NYSE_dates.txt in the qstkutil directory"
datestxt = np.loadtxt(filename, dtype=str)
dates = []
for i in datestxt:
dates.append(dt.datetime.strptime(i, "%m/%d/%Y"))
return pd.TimeSeries(index=dates, data=dates)
GTS_DATES = _cache_dates()
def getMonthNames():
return(['JAN','FEB','MAR','APR','MAY','JUN','JUL','AUG','SEP','OCT','NOV','DEC'])
def getYears(funds):
years=[]
for date in funds.index:
if(not(date.year in years)):
years.append(date.year)
return(years)
def getMonths(funds,year):
months=[]
for date in funds.index:
if((date.year==year) and not(date.month in months)):
months.append(date.month)
return(months)
def getDays(funds,year,month):
days=[]
for date in funds.index:
if((date.year==year) and (date.month==month)):
days.append(date)
return(days)
def getDaysBetween(ts_start, ts_end):
days=[]
for i in range(0,(ts_end-ts_start).days):
days.append(ts_start+timedelta(days=1)*i)
return(days)
def getFirstDay(funds,year,month):
for date in funds.index:
if((date.year==year) and (date.month==month)):
return(date)
return('ERROR')
def getLastDay(funds,year,month):
return_date = 'ERROR'
for date in funds.index:
if((date.year==year) and (date.month==month)):
return_date = date
return(return_date)
def getNextOptionClose(day, trade_days, offset=0):
#get third friday in month of day
#get first of month
year_off=0
if day.month+offset > 12:
year_off = 1
offset = offset - 12
first = dt.datetime(day.year+year_off, day.month+offset, 1, hour=16)
#get weekday
day_num = first.weekday()
#get first friday (friday - weekday) add 7 if less than 1
dif = 5 - day_num
if dif < 1:
dif = dif+7
#move to third friday
dif = dif + 14
friday = first+dt.timedelta(days=(dif-1))
#if friday is a holiday, options expire then
if friday in trade_days:
month_close = first + dt.timedelta(days=dif)
else:
month_close = friday
#if day is past the day after that
if month_close < day:
return_date = getNextOptionClose(day, trade_days, offset=1)
else:
return_date = month_close
return(return_date)
def getLastOptionClose(day, trade_days):
start = day
while getNextOptionClose(day, trade_days)>=start:
day= day - dt.timedelta(days=1)
return(getNextOptionClose(day, trade_days))
def getNYSEoffset(mark, offset):
''' Returns NYSE date offset by number of days '''
mark = mark.replace(hour=0, minute=0, second=0, microsecond=0)
i = GTS_DATES.index.searchsorted(mark, side='right')
# If there is no exact match, take first date in past
if GTS_DATES[i] != mark:
i -= 1
ret = GTS_DATES[i + offset]
ret = ret.replace(hour=16)
return ret
def getNYSEdays(startday = dt.datetime(1964,7,5), endday = dt.datetime(2020,12,31),
timeofday = dt.timedelta(0)):
"""
@summary: Create a list of timestamps between startday and endday (inclusive)
that correspond to the days there was trading at the NYSE. This function
depends on a separately created a file that lists all days since July 4,
1962 that the NYSE has been open, going forward to 2020 (based
on the holidays that NYSE recognizes).
@param startday: First timestamp to consider (inclusive)
@param endday: Last day to consider (inclusive)
@return list: of timestamps between startday and endday on which NYSE traded
@rtype datetime
"""
start = startday - timeofday
end = endday - timeofday
dates = GTS_DATES[start:end]
ret = [x + timeofday for x in dates]
return(ret)
def getNextNNYSEdays(startday, days, timeofday):
"""
@summary: Create a list of timestamps from startday that is days days long
that correspond to the days there was trading at NYSE. This function
depends on the file used in getNYSEdays and assumes the dates within are
in order.
@param startday: First timestamp to consider (inclusive)
@param days: Number of timestamps to return
@return list: List of timestamps starting at startday on which NYSE traded
@rtype datetime
"""
try:
# filename = os.environ['QS'] + "/qstkutil/NYSE_dates.txt"
filename = os.path.join(os.path.dirname(__file__), 'NYSE_dates.txt')
except KeyError:
print "Please be sure to set the value for QS in config.sh or\n"
print "in local.sh and then \'source local.sh\'.\n"
datestxt = np.loadtxt(filename,dtype=str)
dates=[]
for i in datestxt:
if(len(dates)<days):
if((dt.datetime.strptime(i,"%m/%d/%Y")+timeofday)>=startday):
dates.append(dt.datetime.strptime(i,"%m/%d/%Y")+timeofday)
return(dates)
def getPrevNNYSEday(startday, timeofday):
"""
@summary: This function returns the last valid trading day before the start
day, or returns the start day if it is a valid trading day. This function
depends on the file used in getNYSEdays and assumes the dates within are
in order.
@param startday: First timestamp to consider (inclusive)
@param days: Number of timestamps to return
@return list: List of timestamps starting at startday on which NYSE traded
@rtype datetime
"""
try:
# filename = os.environ['QS'] + "/qstkutil/NYSE_dates.txt"
filename = os.path.join(os.path.dirname(__file__), 'NYSE_dates.txt')
except KeyError:
print "Please be sure to set the value for QS in config.sh or\n"
print "in local.sh and then \'source local.sh\'.\n"
datestxt = np.loadtxt(filename,dtype=str)
#''' Set return to first day '''
dtReturn = dt.datetime.strptime( datestxt[0],"%m/%d/%Y")+timeofday
#''' Loop through all but first '''
for i in datestxt[1:]:
dtNext = dt.datetime.strptime(i,"%m/%d/%Y")
#''' If we are > startday, then use previous valid day '''
if( dtNext > startday ):
break
dtReturn = dtNext + timeofday
return(dtReturn)
def ymd2epoch(year, month, day):
"""
@summary: Convert YMD info into a unix epoch value.
@param year: The year
@param month: The month
@param day: The day
@return epoch: number of seconds since epoch
"""
return(t.mktime(dt.date(year,month,day).timetuple()))
def epoch2date(ts):
"""
@summary Convert seconds since epoch into date
@param ts: Seconds since epoch
@return thedate: A date object
"""
tm = t.gmtime(ts)
return(dt.date(tm.tm_year,tm.tm_mon,tm.tm_mday))
def _trade_dates(dt_start, dt_end, s_period):
'''
@summary: Generate dates on which we need to trade
@param c_strat: Strategy config class
@param dt_start: Start date
@param dt_end: End date
'''
ldt_timestamps = getNYSEdays(dt_start,
dt_end, dt.timedelta(hours=16) )
# Use pandas reindex method instead
# Note, dates are index as well as values, we select based on index
# but return values since it is a numpy array of datetimes instead of
# pandas specific.
ts_dates = pd.TimeSeries(index=ldt_timestamps, data=ldt_timestamps)
# These are the dates we want
if s_period[:2] == 'BW':
# special case for biweekly
dr_range = pd.DateRange(dt_start, dt_end,
timeRule=s_period[1:])
dr_range = np.asarray(dr_range)
li_even = np.array(range(len(dr_range)))
dr_range = dr_range[li_even[li_even % 2 == 0]]
else:
dr_range = pd.DateRange(dt_start, dt_end,
timeRule=s_period)
dr_range = np.asarray(dr_range)
# Warning, we MUST copy the date range, if we modify it it will be returned
# in it's modified form the next time we use it.
dr_range = np.copy(dr_range)
dr_range += pd.DateOffset(hours=16)
ts_dates = ts_dates.reindex( dr_range, method='bfill' )
ldt_dates = ts_dates[ts_dates.notnull()].values
#Make unique
sdt_unique = set()
ldt_dates = [x for x in ldt_dates
if x not in sdt_unique and not sdt_unique.add(x)]
return ldt_dates
| bsd-3-clause |
nesterione/scikit-learn | sklearn/__init__.py | 154 | 3014 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.17.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'lda', 'learning_curve',
'linear_model', 'manifold', 'metrics', 'mixture', 'multiclass',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| bsd-3-clause |
mmottahedi/neuralnilm_prototype | scripts/e278.py | 2 | 51343 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.objectives import scaled_cost
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
n_seq_per_batch=10,
subsample_target=3,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
standardise_input=True,
standardise_targets=True,
input_padding=0,
lag=0,
input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
'std': np.array([ 0.12636775], dtype=np.float32)},
target_stats={
'mean': np.array([ 0.04066789, 0.01881946,
0.24639061, 0.17608672, 0.10273963],
dtype=np.float32),
'std': np.array([ 0.11449792, 0.07338708,
0.26608968, 0.33463112, 0.21250485],
dtype=np.float32)}
)
def change_learning_rate(net, epoch):
net.updates = partial(nesterov_momentum, learning_rate=0.001)
net.compile()
def change_subsample(net, epoch):
net.source.subsample_target = 3
net.generate_validation_data_and_set_shapes()
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=scaled_cost,
updates=partial(nesterov_momentum, learning_rate=0.01),
do_save_activations=True,
epoch_callbacks={501: change_learning_rate}
)
def exp_a(name):
# avg valid cost = 0.5296852589
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_b(name):
# 1 layer, pool after RNN
# avg valid cost = 0.8358715773
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_c(name):
# 2 layers, pool in between
# avg valid cost = 0.5183933973
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=(1/sqrt(N))),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_d(name):
# 3 layers, pool after first layer
# avg valid cost = 0.5396855474
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=(1/sqrt(N))),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=(1/sqrt(N))),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_e(name):
# layerwise pre-training
# avg valid cost = 0.6081719398
source_dict_copy = deepcopy(source_dict)
source_dict_copy['subsample_target'] = 1
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net_dict_copy['layer_changes'] = {
501: {
'remove_from': -3,
'new_layers': [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
},
1001: {
'remove_from': -3,
'callback': change_subsample,
'new_layers': [
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
}
}
net = Net(**net_dict_copy)
return net
def exp_f(name):
# layerwise pre-training (pre-train with pool)
# need to re-run
source_dict_copy = deepcopy(source_dict)
source_dict_copy['subsample_target'] = 1
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net_dict_copy['layer_changes'] = {
501: {
'remove_from': -3,
'callback': change_subsample,
'new_layers': [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
}
]
},
1001: {
'remove_from': -3,
'new_layers': [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
}
}
net = Net(**net_dict_copy)
return net
def exp_g(name):
# sigmoid
# avg valid cost = 1.5114594698
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': sigmoid
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': sigmoid
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': sigmoid
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_h(name):
# ReLU
# doesn't train: training error is 0
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': rectify
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': rectify
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_i(name):
# 2 x dense layers at end (both with 5 units)
# avg valid cost = 0.5794851780
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(source.n_outputs)))
}
]
net = Net(**net_dict_copy)
return net
def exp_j(name):
# 2 x dense layers at end (penultiate with 50 units)
# avg valid cost = 0.5457109213
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': N,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_k(name):
# RNN output
# doesn't train (0 error)
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': source.n_outputs,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def exp_l(name):
# RNN output, 2 layers
# NaNs
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': source.n_outputs,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': None
},
{
'type': BidirectionalRecurrentLayer,
'num_units': source.n_outputs,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(source.n_outputs)),
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def exp_m(name):
# Conv AND pool, with 50 filters, filter_length=10
# avg valid cost = 0.4936202168
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
subsample_target=3,
input_padding=9
))
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 50,
'filter_length': 10,
'stride': 1,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(N))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_n(name):
# Conv AND pool, with 10 filters
# avg valid cost = 0.4842122793 (run on 277)
# avg valid cost = 0.5845696926 (run on 278)
# need to re-run with correct initialisations
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
subsample_target=3,
input_padding=2
))
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 10,
'filter_length': 3,
'stride': 1,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(N))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_o(name):
# 2 lots of conv, then pool
# need to re-run with correct initialisations
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
subsample_target=3,
input_padding=4
))
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 32,
'filter_length': 3,
'stride': 1,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(N))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 3,
'stride': 1,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(N))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_p(name):
# 2 lots of conv, then pool
# a re-run of O but with correct initialisations
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
subsample_target=3,
input_padding=4
))
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 32,
'filter_length': 3,
'stride': 1,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(N))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(32)),
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 3,
'stride': 1,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(16)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_q(name):
# Conv AND pool, with 10 filters
# avg valid cost = 0.4842122793 (run on 277)
# avg valid cost = 0.5845696926 (run on 278)
# re-run of P but with correct initialisations
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
subsample_target=3,
input_padding=2
))
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 10,
'filter_length': 3,
'stride': 1,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(N))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(10)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
net.load_params(iteration=1139)
return net
def exp_r(name):
# 2 lots of conv, then pool
# a re-run of O but with 50 filters and correct init
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
subsample_target=3,
input_padding=4
))
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': N,
'filter_length': 3,
'stride': 1,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(N))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': N,
'filter_length': 3,
'stride': 1,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(N))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_s(name):
# Conv AND pool, with 50 filters
# avg valid cost = 0.4842122793 (run on 277)
# avg valid cost = 0.5845696926 (run on 278)
# re-run of P but with correct initialisations and 50 filters
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
subsample_target=3,
input_padding=2
))
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': N,
'filter_length': 3,
'stride': 1,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(N))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_t(name):
# a but with no random seed
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
seed=None
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_u(name):
# a but with no random seed
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
seed=None
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_v(name):
# a
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_w(name):
# 2x2x pool
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
downsample_target=4
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_x(name):
# 3x2x pool
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
downsample_target=6
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_y(name):
# 5-way RNN as penultimate layer
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 5,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(5)))
}
]
net = Net(**net_dict_copy)
return net
def exp_z(name):
# layerwise pre-training
# avg valid cost = 0.6081719398
source_dict_copy = deepcopy(source_dict)
source_dict_copy['subsample_target'] = 1
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
epoch_callbacks={4501: change_learning_rate}
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net_dict_copy['layer_changes'] = {
2001: {
'remove_from': -3,
'new_layers': [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
},
4001: {
'remove_from': -3,
'callback': change_subsample,
'new_layers': [
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
}
}
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('qrstuvwxyz')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=6000 if experiment == 'z' else 2000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
if __name__ == "__main__":
main()
| mit |
vanpact/scipy | scipy/interpolate/fitpack.py | 21 | 46139 | #!/usr/bin/env python
"""
fitpack (dierckx in netlib) --- A Python-C wrapper to FITPACK (by P. Dierckx).
FITPACK is a collection of FORTRAN programs for curve and surface
fitting with splines and tensor product splines.
See
http://www.cs.kuleuven.ac.be/cwis/research/nalag/research/topics/fitpack.html
or
http://www.netlib.org/dierckx/index.html
Copyright 2002 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the SciPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
TODO: Make interfaces to the following fitpack functions:
For univariate splines: cocosp, concon, fourco, insert
For bivariate splines: profil, regrid, parsur, surev
"""
from __future__ import division, print_function, absolute_import
__all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']
import warnings
import numpy as np
from . import _fitpack
from numpy import (atleast_1d, array, ones, zeros, sqrt, ravel, transpose,
empty, iinfo, intc, asarray)
# Try to replace _fitpack interface with
# f2py-generated version
from . import dfitpack
def _intc_overflow(x, msg=None):
"""Cast the value to an intc and raise an OverflowError if the value
cannot fit.
"""
if x > iinfo(intc).max:
if msg is None:
msg = '%r cannot fit into an intc' % x
raise OverflowError(msg)
return intc(x)
_iermess = {
0: ["The spline has a residual sum of squares fp such that "
"abs(fp-s)/s<=0.001", None],
-1: ["The spline is an interpolating spline (fp=0)", None],
-2: ["The spline is weighted least-squares polynomial of degree k.\n"
"fp gives the upper bound fp0 for the smoothing factor s", None],
1: ["The required storage space exceeds the available storage space.\n"
"Probable causes: data (x,y) size is too small or smoothing parameter"
"\ns is too small (fp>s).", ValueError],
2: ["A theoretically impossible result when finding a smoothing spline\n"
"with fp = s. Probable cause: s too small. (abs(fp-s)/s>0.001)",
ValueError],
3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
"spline with fp=s has been reached. Probable cause: s too small.\n"
"(abs(fp-s)/s>0.001)", ValueError],
10: ["Error on input data", ValueError],
'unknown': ["An error occurred", TypeError]
}
_iermess2 = {
0: ["The spline has a residual sum of squares fp such that "
"abs(fp-s)/s<=0.001", None],
-1: ["The spline is an interpolating spline (fp=0)", None],
-2: ["The spline is weighted least-squares polynomial of degree kx and ky."
"\nfp gives the upper bound fp0 for the smoothing factor s", None],
-3: ["Warning. The coefficients of the spline have been computed as the\n"
"minimal norm least-squares solution of a rank deficient system.",
None],
1: ["The required storage space exceeds the available storage space.\n"
"Probable causes: nxest or nyest too small or s is too small. (fp>s)",
ValueError],
2: ["A theoretically impossible result when finding a smoothing spline\n"
"with fp = s. Probable causes: s too small or badly chosen eps.\n"
"(abs(fp-s)/s>0.001)", ValueError],
3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
"spline with fp=s has been reached. Probable cause: s too small.\n"
"(abs(fp-s)/s>0.001)", ValueError],
4: ["No more knots can be added because the number of B-spline\n"
"coefficients already exceeds the number of data points m.\n"
"Probable causes: either s or m too small. (fp>s)", ValueError],
5: ["No more knots can be added because the additional knot would\n"
"coincide with an old one. Probable cause: s too small or too large\n"
"a weight to an inaccurate data point. (fp>s)", ValueError],
10: ["Error on input data", ValueError],
11: ["rwrk2 too small, i.e. there is not enough workspace for computing\n"
"the minimal least-squares solution of a rank deficient system of\n"
"linear equations.", ValueError],
'unknown': ["An error occurred", TypeError]
}
_parcur_cache = {'t': array([], float), 'wrk': array([], float),
'iwrk': array([], intc), 'u': array([], float),
'ub': 0, 'ue': 1}
def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,
full_output=0, nest=None, per=0, quiet=1):
"""
Find the B-spline representation of an N-dimensional curve.
Given a list of N rank-1 arrays, `x`, which represent a curve in
N-dimensional space parametrized by `u`, find a smooth approximating
spline curve g(`u`). Uses the FORTRAN routine parcur from FITPACK.
Parameters
----------
x : array_like
A list of sample vector arrays representing the curve.
w : array_like, optional
Strictly positive rank-1 array of weights the same length as `x[0]`.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the `x` values have standard-deviation given by
the vector d, then `w` should be 1/d. Default is ``ones(len(x[0]))``.
u : array_like, optional
An array of parameter values. If not given, these values are
calculated automatically as ``M = len(x[0])``, where
v[0] = 0
v[i] = v[i-1] + distance(`x[i]`, `x[i-1]`)
u[i] = v[i] / v[M-1]
ub, ue : int, optional
The end-points of the parameters interval. Defaults to
u[0] and u[-1].
k : int, optional
Degree of the spline. Cubic splines are recommended.
Even values of `k` should be avoided especially with a small s-value.
``1 <= k <= 5``, default is 3.
task : int, optional
If task==0 (default), find t and c for a given smoothing factor, s.
If task==1, find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1
for the same set of data.
If task=-1 find the weighted least square spline for a given set of
knots, t.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``,
where g(x) is the smoothed interpolation of (x,y). The user can
use `s` to control the trade-off between closeness and smoothness
of fit. Larger `s` means more smoothing while smaller values of `s`
indicate less smoothing. Recommended values of `s` depend on the
weights, w. If the weights represent the inverse of the
standard-deviation of y, then a good `s` value should be found in
the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of
data points in x, y, and w.
t : int, optional
The knots needed for task=-1.
full_output : int, optional
If non-zero, then return optional outputs.
nest : int, optional
An over-estimate of the total number of knots of the spline to
help in determining the storage space. By default nest=m/2.
Always large enough is nest=m+k+1.
per : int, optional
If non-zero, data points are considered periodic with period
``x[m-1] - x[0]`` and a smooth periodic spline approximation is
returned. Values of ``y[m-1]`` and ``w[m-1]`` are not used.
quiet : int, optional
Non-zero to suppress messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
u : array
An array of the values of the parameter.
fp : float
The weighted sum of squared residuals of the spline approximation.
ier : int
An integer flag about splrep success. Success is indicated
if ier<=0. If ier in [1,2,3] an error occurred but was not raised.
Otherwise an error is raised.
msg : str
A message corresponding to the integer flag, ier.
See Also
--------
splrep, splev, sproot, spalde, splint,
bisplrep, bisplev
UnivariateSpline, BivariateSpline
Notes
-----
See `splev` for evaluation of the spline and its derivatives.
The number of dimensions N must be smaller than 11.
References
----------
.. [1] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines, Computer Graphics and Image Processing",
20 (1982) 171-184.
.. [2] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines", report tw55, Dept. Computer Science,
K.U.Leuven, 1981.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
if task <= 0:
_parcur_cache = {'t': array([], float), 'wrk': array([], float),
'iwrk': array([], intc), 'u': array([], float),
'ub': 0, 'ue': 1}
x = atleast_1d(x)
idim, m = x.shape
if per:
for i in range(idim):
if x[i][0] != x[i][-1]:
if quiet < 2:
warnings.warn(RuntimeWarning('Setting x[%d][%d]=x[%d][0]' %
(i, m, i)))
x[i][-1] = x[i][0]
if not 0 < idim < 11:
raise TypeError('0 < idim < 11 must hold')
if w is None:
w = ones(m, float)
else:
w = atleast_1d(w)
ipar = (u is not None)
if ipar:
_parcur_cache['u'] = u
if ub is None:
_parcur_cache['ub'] = u[0]
else:
_parcur_cache['ub'] = ub
if ue is None:
_parcur_cache['ue'] = u[-1]
else:
_parcur_cache['ue'] = ue
else:
_parcur_cache['u'] = zeros(m, float)
if not (1 <= k <= 5):
raise TypeError('1 <= k= %d <=5 must hold' % k)
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if (not len(w) == m) or (ipar == 1 and (not len(u) == m)):
raise TypeError('Mismatch of input dimensions')
if s is None:
s = m - sqrt(2*m)
if t is None and task == -1:
raise TypeError('Knots must be given for task=-1')
if t is not None:
_parcur_cache['t'] = atleast_1d(t)
n = len(_parcur_cache['t'])
if task == -1 and n < 2*k + 2:
raise TypeError('There must be at least 2*k+2 knots for task=-1')
if m <= k:
raise TypeError('m > k must hold')
if nest is None:
nest = m + 2*k
if (task >= 0 and s == 0) or (nest < 0):
if per:
nest = m + 2*k
else:
nest = m + k + 1
nest = max(nest, 2*k + 3)
u = _parcur_cache['u']
ub = _parcur_cache['ub']
ue = _parcur_cache['ue']
t = _parcur_cache['t']
wrk = _parcur_cache['wrk']
iwrk = _parcur_cache['iwrk']
t, c, o = _fitpack._parcur(ravel(transpose(x)), w, u, ub, ue, k,
task, ipar, s, t, nest, wrk, iwrk, per)
_parcur_cache['u'] = o['u']
_parcur_cache['ub'] = o['ub']
_parcur_cache['ue'] = o['ue']
_parcur_cache['t'] = t
_parcur_cache['wrk'] = o['wrk']
_parcur_cache['iwrk'] = o['iwrk']
ier = o['ier']
fp = o['fp']
n = len(t)
u = o['u']
c.shape = idim, n - k - 1
tcku = [t, list(c), k], u
if ier <= 0 and not quiet:
warnings.warn(RuntimeWarning(_iermess[ier][0] +
"\tk=%d n=%d m=%d fp=%f s=%f" %
(k, len(t), m, fp, s)))
if ier > 0 and not full_output:
if ier in [1, 2, 3]:
warnings.warn(RuntimeWarning(_iermess[ier][0]))
else:
try:
raise _iermess[ier][1](_iermess[ier][0])
except KeyError:
raise _iermess['unknown'][1](_iermess['unknown'][0])
if full_output:
try:
return tcku, fp, ier, _iermess[ier][0]
except KeyError:
return tcku, fp, ier, _iermess['unknown'][0]
else:
return tcku
_curfit_cache = {'t': array([], float), 'wrk': array([], float),
'iwrk': array([], intc)}
def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,
full_output=0, per=0, quiet=1):
"""
Find the B-spline representation of 1-D curve.
Given the set of data points ``(x[i], y[i])`` determine a smooth spline
approximation of degree k on the interval ``xb <= x <= xe``.
Parameters
----------
x, y : array_like
The data points defining a curve y = f(x).
w : array_like, optional
Strictly positive rank-1 array of weights the same length as x and y.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the y values have standard-deviation given by the
vector d, then w should be 1/d. Default is ones(len(x)).
xb, xe : float, optional
The interval to fit. If None, these default to x[0] and x[-1]
respectively.
k : int, optional
The order of the spline fit. It is recommended to use cubic splines.
Even order splines should be avoided especially with small s values.
1 <= k <= 5
task : {1, 0, -1}, optional
If task==0 find t and c for a given smoothing factor, s.
If task==1 find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1 for the same
set of data (t will be stored an used internally)
If task=-1 find the weighted least square spline for a given set of
knots, t. These should be interior knots as knots on the ends will be
added automatically.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: sum((w * (y - g))**2,axis=0) <= s where g(x)
is the smoothed interpolation of (x,y). The user can use s to control
the tradeoff between closeness and smoothness of fit. Larger s means
more smoothing while smaller values of s indicate less smoothing.
Recommended values of s depend on the weights, w. If the weights
represent the inverse of the standard-deviation of y, then a good s
value should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m is
the number of datapoints in x, y, and w. default : s=m-sqrt(2*m) if
weights are supplied. s = 0.0 (interpolating) if no weights are
supplied.
t : array_like, optional
The knots needed for task=-1. If given then task is automatically set
to -1.
full_output : bool, optional
If non-zero, then return optional outputs.
per : bool, optional
If non-zero, data points are considered periodic with period x[m-1] -
x[0] and a smooth periodic spline approximation is returned. Values of
y[m-1] and w[m-1] are not used.
quiet : bool, optional
Non-zero to suppress messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : tuple
(t,c,k) a tuple containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
fp : array, optional
The weighted sum of squared residuals of the spline approximation.
ier : int, optional
An integer flag about splrep success. Success is indicated if ier<=0.
If ier in [1,2,3] an error occurred but was not raised. Otherwise an
error is raised.
msg : str, optional
A message corresponding to the integer flag, ier.
Notes
-----
See splev for evaluation of the spline and its derivatives.
The user is responsible for assuring that the values of *x* are unique.
Otherwise, *splrep* will not return sensible results.
See Also
--------
UnivariateSpline, BivariateSpline
splprep, splev, sproot, spalde, splint
bisplrep, bisplev
Notes
-----
See splev for evaluation of the spline and its derivatives. Uses the
FORTRAN routine curfit from FITPACK.
If provided, knots `t` must satisfy the Schoenberg-Whitney conditions,
i.e., there must be a subset of data points ``x[j]`` such that
``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
References
----------
Based on algorithms described in [1]_, [2]_, [3]_, and [4]_:
.. [1] P. Dierckx, "An algorithm for smoothing, differentiation and
integration of experimental data using spline functions",
J.Comp.Appl.Maths 1 (1975) 165-184.
.. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular
grid while using spline functions", SIAM J.Numer.Anal. 19 (1982)
1286-1304.
.. [3] P. Dierckx, "An improved algorithm for curve fitting with spline
functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981.
.. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import splev, splrep
>>> x = np.linspace(0, 10, 10)
>>> y = np.sin(x)
>>> tck = splrep(x, y)
>>> x2 = np.linspace(0, 10, 200)
>>> y2 = splev(x2, tck)
>>> plt.plot(x, y, 'o', x2, y2)
>>> plt.show()
"""
if task <= 0:
_curfit_cache = {}
x, y = map(atleast_1d, [x, y])
m = len(x)
if w is None:
w = ones(m, float)
if s is None:
s = 0.0
else:
w = atleast_1d(w)
if s is None:
s = m - sqrt(2*m)
if not len(w) == m:
raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
if (m != len(y)) or (m != len(w)):
raise TypeError('Lengths of the first three arguments (x,y,w) must '
'be equal')
if not (1 <= k <= 5):
raise TypeError('Given degree of the spline (k=%d) is not supported. '
'(1<=k<=5)' % k)
if m <= k:
raise TypeError('m > k must hold')
if xb is None:
xb = x[0]
if xe is None:
xe = x[-1]
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if t is not None:
task = -1
if task == -1:
if t is None:
raise TypeError('Knots must be given for task=-1')
numknots = len(t)
_curfit_cache['t'] = empty((numknots + 2*k + 2,), float)
_curfit_cache['t'][k+1:-k-1] = t
nest = len(_curfit_cache['t'])
elif task == 0:
if per:
nest = max(m + 2*k, 2*k + 3)
else:
nest = max(m + k + 1, 2*k + 3)
t = empty((nest,), float)
_curfit_cache['t'] = t
if task <= 0:
if per:
_curfit_cache['wrk'] = empty((m*(k + 1) + nest*(8 + 5*k),), float)
else:
_curfit_cache['wrk'] = empty((m*(k + 1) + nest*(7 + 3*k),), float)
_curfit_cache['iwrk'] = empty((nest,), intc)
try:
t = _curfit_cache['t']
wrk = _curfit_cache['wrk']
iwrk = _curfit_cache['iwrk']
except KeyError:
raise TypeError("must call with task=1 only after"
" call with task=0,-1")
if not per:
n, c, fp, ier = dfitpack.curfit(task, x, y, w, t, wrk, iwrk,
xb, xe, k, s)
else:
n, c, fp, ier = dfitpack.percur(task, x, y, w, t, wrk, iwrk, k, s)
tck = (t[:n], c[:n], k)
if ier <= 0 and not quiet:
_mess = (_iermess[ier][0] + "\tk=%d n=%d m=%d fp=%f s=%f" %
(k, len(t), m, fp, s))
warnings.warn(RuntimeWarning(_mess))
if ier > 0 and not full_output:
if ier in [1, 2, 3]:
warnings.warn(RuntimeWarning(+_iermess[ier][0]))
else:
try:
raise _iermess[ier][1](_iermess[ier][0])
except KeyError:
raise _iermess['unknown'][1](_iermess['unknown'][0])
if full_output:
try:
return tck, fp, ier, _iermess[ier][0]
except KeyError:
return tck, fp, ier, _iermess['unknown'][0]
else:
return tck
def splev(x, tck, der=0, ext=0):
"""
Evaluate a B-spline or its derivatives.
Given the knots and coefficients of a B-spline representation, evaluate
the value of the smoothing polynomial and its derivatives. This is a
wrapper around the FORTRAN routines splev and splder of FITPACK.
Parameters
----------
x : array_like
An array of points at which to return the value of the smoothed
spline or its derivatives. If `tck` was returned from `splprep`,
then the parameter values, u should be given.
tck : tuple
A sequence of length 3 returned by `splrep` or `splprep` containing
the knots, coefficients, and degree of the spline.
der : int, optional
The order of derivative of the spline to compute (must be less than
or equal to k).
ext : int, optional
Controls the value returned for elements of ``x`` not in the
interval defined by the knot sequence.
* if ext=0, return the extrapolated value.
* if ext=1, return 0
* if ext=2, raise a ValueError
* if ext=3, return the boundary value.
The default value is 0.
Returns
-------
y : ndarray or list of ndarrays
An array of values representing the spline function evaluated at
the points in ``x``. If `tck` was returned from `splprep`, then this
is a list of arrays representing the curve in N-dimensional space.
See Also
--------
splprep, splrep, sproot, spalde, splint
bisplrep, bisplev
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M.G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return list(map(lambda c, x=x, t=t, k=k, der=der:
splev(x, [t, c, k], der, ext), c))
else:
if not (0 <= der <= k):
raise ValueError("0<=der=%d<=k=%d must hold" % (der, k))
if ext not in (0, 1, 2, 3):
raise ValueError("ext = %s not in (0, 1, 2, 3) " % ext)
x = asarray(x)
shape = x.shape
x = atleast_1d(x).ravel()
y, ier = _fitpack._spl_(x, der, t, c, k, ext)
if ier == 10:
raise ValueError("Invalid input data")
if ier == 1:
raise ValueError("Found x value not in the domain")
if ier:
raise TypeError("An error occurred")
return y.reshape(shape)
def splint(a, b, tck, full_output=0):
"""
Evaluate the definite integral of a B-spline.
Given the knots and coefficients of a B-spline, evaluate the definite
integral of the smoothing polynomial between two given points.
Parameters
----------
a, b : float
The end-points of the integration interval.
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the spline (see `splev`).
full_output : int, optional
Non-zero to return optional output.
Returns
-------
integral : float
The resulting integral.
wrk : ndarray
An array containing the integrals of the normalized B-splines
defined on the set of knots.
Notes
-----
splint silently assumes that the spline function is zero outside the data
interval (a, b).
See Also
--------
splprep, splrep, sproot, spalde, splev
bisplrep, bisplev
UnivariateSpline, BivariateSpline
References
----------
.. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines",
J. Inst. Maths Applics, 17, p.37-41, 1976.
.. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return list(map(lambda c, a=a, b=b, t=t, k=k:
splint(a, b, [t, c, k]), c))
else:
aint, wrk = _fitpack._splint(t, c, k, a, b)
if full_output:
return aint, wrk
else:
return aint
def sproot(tck, mest=10):
"""
Find the roots of a cubic B-spline.
Given the knots (>=8) and coefficients of a cubic B-spline return the
roots of the spline.
Parameters
----------
tck : tuple
A tuple (t,c,k) containing the vector of knots,
the B-spline coefficients, and the degree of the spline.
The number of knots must be >= 8, and the degree must be 3.
The knots must be a montonically increasing sequence.
mest : int, optional
An estimate of the number of zeros (Default is 10).
Returns
-------
zeros : ndarray
An array giving the roots of the spline.
See also
--------
splprep, splrep, splint, spalde, splev
bisplrep, bisplev
UnivariateSpline, BivariateSpline
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M.G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
if k != 3:
raise ValueError("sproot works only for cubic (k=3) splines")
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return list(map(lambda c, t=t, k=k, mest=mest:
sproot([t, c, k], mest), c))
else:
if len(t) < 8:
raise TypeError("The number of knots %d>=8" % len(t))
z, ier = _fitpack._sproot(t, c, k, mest)
if ier == 10:
raise TypeError("Invalid input data. "
"t1<=..<=t4<t5<..<tn-3<=..<=tn must hold.")
if ier == 0:
return z
if ier == 1:
warnings.warn(RuntimeWarning("The number of zeros exceeds mest"))
return z
raise TypeError("Unknown error")
def spalde(x, tck):
"""
Evaluate all derivatives of a B-spline.
Given the knots and coefficients of a cubic B-spline compute all
derivatives up to order k at a point (or set of points).
Parameters
----------
x : array_like
A point or a set of points at which to evaluate the derivatives.
Note that ``t(k) <= x <= t(n-k+1)`` must hold for each `x`.
tck : tuple
A tuple (t,c,k) containing the vector of knots,
the B-spline coefficients, and the degree of the spline.
Returns
-------
results : {ndarray, list of ndarrays}
An array (or a list of arrays) containing all derivatives
up to order k inclusive for each point `x`.
See Also
--------
splprep, splrep, splint, sproot, splev, bisplrep, bisplev,
UnivariateSpline, BivariateSpline
References
----------
.. [1] de Boor C : On calculating with b-splines, J. Approximation Theory
6 (1972) 50-62.
.. [2] Cox M.G. : The numerical evaluation of b-splines, J. Inst. Maths
applics 10 (1972) 134-149.
.. [3] Dierckx P. : Curve and surface fitting with splines, Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return list(map(lambda c, x=x, t=t, k=k:
spalde(x, [t, c, k]), c))
else:
x = atleast_1d(x)
if len(x) > 1:
return list(map(lambda x, tck=tck: spalde(x, tck), x))
d, ier = _fitpack._spalde(t, c, k, x[0])
if ier == 0:
return d
if ier == 10:
raise TypeError("Invalid input data. t(k)<=x<=t(n-k+1) must hold.")
raise TypeError("Unknown error")
# def _curfit(x,y,w=None,xb=None,xe=None,k=3,task=0,s=None,t=None,
# full_output=0,nest=None,per=0,quiet=1):
_surfit_cache = {'tx': array([], float), 'ty': array([], float),
'wrk': array([], float), 'iwrk': array([], intc)}
def bisplrep(x, y, z, w=None, xb=None, xe=None, yb=None, ye=None,
kx=3, ky=3, task=0, s=None, eps=1e-16, tx=None, ty=None,
full_output=0, nxest=None, nyest=None, quiet=1):
"""
Find a bivariate B-spline representation of a surface.
Given a set of data points (x[i], y[i], z[i]) representing a surface
z=f(x,y), compute a B-spline representation of the surface. Based on
the routine SURFIT from FITPACK.
Parameters
----------
x, y, z : ndarray
Rank-1 arrays of data points.
w : ndarray, optional
Rank-1 array of weights. By default ``w=np.ones(len(x))``.
xb, xe : float, optional
End points of approximation interval in `x`.
By default ``xb = x.min(), xe=x.max()``.
yb, ye : float, optional
End points of approximation interval in `y`.
By default ``yb=y.min(), ye = y.max()``.
kx, ky : int, optional
The degrees of the spline (1 <= kx, ky <= 5).
Third order (kx=ky=3) is recommended.
task : int, optional
If task=0, find knots in x and y and coefficients for a given
smoothing factor, s.
If task=1, find knots and coefficients for another value of the
smoothing factor, s. bisplrep must have been previously called
with task=0 or task=1.
If task=-1, find coefficients for a given set of knots tx, ty.
s : float, optional
A non-negative smoothing factor. If weights correspond
to the inverse of the standard-deviation of the errors in z,
then a good s-value should be found in the range
``(m-sqrt(2*m),m+sqrt(2*m))`` where m=len(x).
eps : float, optional
A threshold for determining the effective rank of an
over-determined linear system of equations (0 < eps < 1).
`eps` is not likely to need changing.
tx, ty : ndarray, optional
Rank-1 arrays of the knots of the spline for task=-1
full_output : int, optional
Non-zero to return optional outputs.
nxest, nyest : int, optional
Over-estimates of the total number of knots. If None then
``nxest = max(kx+sqrt(m/2),2*kx+3)``,
``nyest = max(ky+sqrt(m/2),2*ky+3)``.
quiet : int, optional
Non-zero to suppress printing of messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : array_like
A list [tx, ty, c, kx, ky] containing the knots (tx, ty) and
coefficients (c) of the bivariate B-spline representation of the
surface along with the degree of the spline.
fp : ndarray
The weighted sum of squared residuals of the spline approximation.
ier : int
An integer flag about splrep success. Success is indicated if
ier<=0. If ier in [1,2,3] an error occurred but was not raised.
Otherwise an error is raised.
msg : str
A message corresponding to the integer flag, ier.
See Also
--------
splprep, splrep, splint, sproot, splev
UnivariateSpline, BivariateSpline
Notes
-----
See `bisplev` to evaluate the value of the B-spline given its tck
representation.
References
----------
.. [1] Dierckx P.:An algorithm for surface fitting with spline functions
Ima J. Numer. Anal. 1 (1981) 267-283.
.. [2] Dierckx P.:An algorithm for surface fitting with spline functions
report tw50, Dept. Computer Science,K.U.Leuven, 1980.
.. [3] Dierckx P.:Curve and surface fitting with splines, Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
x, y, z = map(ravel, [x, y, z]) # ensure 1-d arrays.
m = len(x)
if not (m == len(y) == len(z)):
raise TypeError('len(x)==len(y)==len(z) must hold.')
if w is None:
w = ones(m, float)
else:
w = atleast_1d(w)
if not len(w) == m:
raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
if xb is None:
xb = x.min()
if xe is None:
xe = x.max()
if yb is None:
yb = y.min()
if ye is None:
ye = y.max()
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if s is None:
s = m - sqrt(2*m)
if tx is None and task == -1:
raise TypeError('Knots_x must be given for task=-1')
if tx is not None:
_surfit_cache['tx'] = atleast_1d(tx)
nx = len(_surfit_cache['tx'])
if ty is None and task == -1:
raise TypeError('K nots_y must be given for task=-1')
if ty is not None:
_surfit_cache['ty'] = atleast_1d(ty)
ny = len(_surfit_cache['ty'])
if task == -1 and nx < 2*kx+2:
raise TypeError('There must be at least 2*kx+2 knots_x for task=-1')
if task == -1 and ny < 2*ky+2:
raise TypeError('There must be at least 2*ky+2 knots_x for task=-1')
if not ((1 <= kx <= 5) and (1 <= ky <= 5)):
raise TypeError('Given degree of the spline (kx,ky=%d,%d) is not '
'supported. (1<=k<=5)' % (kx, ky))
if m < (kx + 1)*(ky + 1):
raise TypeError('m >= (kx+1)(ky+1) must hold')
if nxest is None:
nxest = int(kx + sqrt(m/2))
if nyest is None:
nyest = int(ky + sqrt(m/2))
nxest, nyest = max(nxest, 2*kx + 3), max(nyest, 2*ky + 3)
if task >= 0 and s == 0:
nxest = int(kx + sqrt(3*m))
nyest = int(ky + sqrt(3*m))
if task == -1:
_surfit_cache['tx'] = atleast_1d(tx)
_surfit_cache['ty'] = atleast_1d(ty)
tx, ty = _surfit_cache['tx'], _surfit_cache['ty']
wrk = _surfit_cache['wrk']
u = nxest - kx - 1
v = nyest - ky - 1
km = max(kx, ky) + 1
ne = max(nxest, nyest)
bx, by = kx*v + ky + 1, ky*u + kx + 1
b1, b2 = bx, bx + v - ky
if bx > by:
b1, b2 = by, by + u - kx
msg = "Too many data points to interpolate"
lwrk1 = _intc_overflow(u*v*(2 + b1 + b2) +
2*(u + v + km*(m + ne) + ne - kx - ky) + b2 + 1,
msg=msg)
lwrk2 = _intc_overflow(u*v*(b2 + 1) + b2, msg=msg)
tx, ty, c, o = _fitpack._surfit(x, y, z, w, xb, xe, yb, ye, kx, ky,
task, s, eps, tx, ty, nxest, nyest,
wrk, lwrk1, lwrk2)
_curfit_cache['tx'] = tx
_curfit_cache['ty'] = ty
_curfit_cache['wrk'] = o['wrk']
ier, fp = o['ier'], o['fp']
tck = [tx, ty, c, kx, ky]
ierm = min(11, max(-3, ier))
if ierm <= 0 and not quiet:
_mess = (_iermess2[ierm][0] +
"\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" %
(kx, ky, len(tx), len(ty), m, fp, s))
warnings.warn(RuntimeWarning(_mess))
if ierm > 0 and not full_output:
if ier in [1, 2, 3, 4, 5]:
_mess = ("\n\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" %
(kx, ky, len(tx), len(ty), m, fp, s))
warnings.warn(RuntimeWarning(_iermess2[ierm][0] + _mess))
else:
try:
raise _iermess2[ierm][1](_iermess2[ierm][0])
except KeyError:
raise _iermess2['unknown'][1](_iermess2['unknown'][0])
if full_output:
try:
return tck, fp, ier, _iermess2[ierm][0]
except KeyError:
return tck, fp, ier, _iermess2['unknown'][0]
else:
return tck
def bisplev(x, y, tck, dx=0, dy=0):
"""
Evaluate a bivariate B-spline and its derivatives.
Return a rank-2 array of spline function values (or spline derivative
values) at points given by the cross-product of the rank-1 arrays `x` and
`y`. In special cases, return an array or just a float if either `x` or
`y` or both are floats. Based on BISPEV from FITPACK.
Parameters
----------
x, y : ndarray
Rank-1 arrays specifying the domain over which to evaluate the
spline or its derivative.
tck : tuple
A sequence of length 5 returned by `bisplrep` containing the knot
locations, the coefficients, and the degree of the spline:
[tx, ty, c, kx, ky].
dx, dy : int, optional
The orders of the partial derivatives in `x` and `y` respectively.
Returns
-------
vals : ndarray
The B-spline or its derivative evaluated over the set formed by
the cross-product of `x` and `y`.
See Also
--------
splprep, splrep, splint, sproot, splev
UnivariateSpline, BivariateSpline
Notes
-----
See `bisplrep` to generate the `tck` representation.
References
----------
.. [1] Dierckx P. : An algorithm for surface fitting
with spline functions
Ima J. Numer. Anal. 1 (1981) 267-283.
.. [2] Dierckx P. : An algorithm for surface fitting
with spline functions
report tw50, Dept. Computer Science,K.U.Leuven, 1980.
.. [3] Dierckx P. : Curve and surface fitting with splines,
Monographs on Numerical Analysis, Oxford University Press, 1993.
"""
tx, ty, c, kx, ky = tck
if not (0 <= dx < kx):
raise ValueError("0 <= dx = %d < kx = %d must hold" % (dx, kx))
if not (0 <= dy < ky):
raise ValueError("0 <= dy = %d < ky = %d must hold" % (dy, ky))
x, y = map(atleast_1d, [x, y])
if (len(x.shape) != 1) or (len(y.shape) != 1):
raise ValueError("First two entries should be rank-1 arrays.")
z, ier = _fitpack._bispev(tx, ty, c, kx, ky, x, y, dx, dy)
if ier == 10:
raise ValueError("Invalid input data")
if ier:
raise TypeError("An error occurred")
z.shape = len(x), len(y)
if len(z) > 1:
return z
if len(z[0]) > 1:
return z[0]
return z[0][0]
def dblint(xa, xb, ya, yb, tck):
"""Evaluate the integral of a spline over area [xa,xb] x [ya,yb].
Parameters
----------
xa, xb : float
The end-points of the x integration interval.
ya, yb : float
The end-points of the y integration interval.
tck : list [tx, ty, c, kx, ky]
A sequence of length 5 returned by bisplrep containing the knot
locations tx, ty, the coefficients c, and the degrees kx, ky
of the spline.
Returns
-------
integ : float
The value of the resulting integral.
"""
tx, ty, c, kx, ky = tck
return dfitpack.dblint(tx, ty, c, kx, ky, xa, xb, ya, yb)
def insert(x, tck, m=1, per=0):
"""
Insert knots into a B-spline.
Given the knots and coefficients of a B-spline representation, create a
new B-spline with a knot inserted `m` times at point `x`.
This is a wrapper around the FORTRAN routine insert of FITPACK.
Parameters
----------
x (u) : array_like
A 1-D point at which to insert a new knot(s). If `tck` was returned
from ``splprep``, then the parameter values, u should be given.
tck : tuple
A tuple (t,c,k) returned by ``splrep`` or ``splprep`` containing
the vector of knots, the B-spline coefficients,
and the degree of the spline.
m : int, optional
The number of times to insert the given knot (its multiplicity).
Default is 1.
per : int, optional
If non-zero, the input spline is considered periodic.
Returns
-------
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the new spline.
``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline.
In case of a periodic spline (``per != 0``) there must be
either at least k interior knots t(j) satisfying ``t(k+1)<t(j)<=x``
or at least k interior knots t(j) satisfying ``x<=t(j)<t(n-k)``.
Notes
-----
Based on algorithms from [1]_ and [2]_.
References
----------
.. [1] W. Boehm, "Inserting new knots into b-spline curves.",
Computer Aided Design, 12, p.199-201, 1980.
.. [2] P. Dierckx, "Curve and surface fitting with splines, Monographs on
Numerical Analysis", Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
cc = []
for c_vals in c:
tt, cc_val, kk = insert(x, [t, c_vals, k], m)
cc.append(cc_val)
return (tt, cc, kk)
else:
tt, cc, ier = _fitpack._insert(per, t, c, k, x, m)
if ier == 10:
raise ValueError("Invalid input data")
if ier:
raise TypeError("An error occurred")
return (tt, cc, k)
def splder(tck, n=1):
"""
Compute the spline representation of the derivative of a given spline
Parameters
----------
tck : tuple of (t, c, k)
Spline whose derivative to compute
n : int, optional
Order of derivative to evaluate. Default: 1
Returns
-------
tck_der : tuple of (t2, c2, k2)
Spline of order k2=k-n representing the derivative
of the input spline.
Notes
-----
.. versionadded:: 0.13.0
See Also
--------
splantider, splev, spalde
Examples
--------
This can be used for finding maxima of a curve:
>>> from scipy.interpolate import splrep, splder, sproot
>>> x = np.linspace(0, 10, 70)
>>> y = np.sin(x)
>>> spl = splrep(x, y, k=4)
Now, differentiate the spline and find the zeros of the
derivative. (NB: `sproot` only works for order 3 splines, so we
fit an order 4 spline):
>>> dspl = splder(spl)
>>> sproot(dspl) / np.pi
array([ 0.50000001, 1.5 , 2.49999998])
This agrees well with roots :math:`\pi/2 + n\pi` of
:math:`\cos(x) = \sin'(x)`.
"""
if n < 0:
return splantider(tck, -n)
t, c, k = tck
if n > k:
raise ValueError(("Order of derivative (n = %r) must be <= "
"order of spline (k = %r)") % (n, tck[2]))
with np.errstate(invalid='raise', divide='raise'):
try:
for j in range(n):
# See e.g. Schumaker, Spline Functions: Basic Theory, Chapter 5
# Compute the denominator in the differentiation formula.
dt = t[k+1:-1] - t[1:-k-1]
# Compute the new coefficients
c = (c[1:-1-k] - c[:-2-k]) * k / dt
# Pad coefficient array to same size as knots (FITPACK
# convention)
c = np.r_[c, [0]*k]
# Adjust knots
t = t[1:-1]
k -= 1
except FloatingPointError:
raise ValueError(("The spline has internal repeated knots "
"and is not differentiable %d times") % n)
return t, c, k
def splantider(tck, n=1):
"""
Compute the spline for the antiderivative (integral) of a given spline.
Parameters
----------
tck : tuple of (t, c, k)
Spline whose antiderivative to compute
n : int, optional
Order of antiderivative to evaluate. Default: 1
Returns
-------
tck_ader : tuple of (t2, c2, k2)
Spline of order k2=k+n representing the antiderivative of the input
spline.
See Also
--------
splder, splev, spalde
Notes
-----
The `splder` function is the inverse operation of this function.
Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo
rounding error.
.. versionadded:: 0.13.0
Examples
--------
>>> from scipy.interpolate import splrep, splder, splantider, splev
>>> x = np.linspace(0, np.pi/2, 70)
>>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
>>> spl = splrep(x, y)
The derivative is the inverse operation of the antiderivative,
although some floating point error accumulates:
>>> splev(1.7, spl), splev(1.7, splder(splantider(spl)))
(array(2.1565429877197317), array(2.1565429877201865))
Antiderivative can be used to evaluate definite integrals:
>>> ispl = splantider(spl)
>>> splev(np.pi/2, ispl) - splev(0, ispl)
2.2572053588768486
This is indeed an approximation to the complete elliptic integral
:math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
>>> from scipy.special import ellipk
>>> ellipk(0.8)
2.2572053268208538
"""
if n < 0:
return splder(tck, -n)
t, c, k = tck
for j in range(n):
# This is the inverse set of operations to splder.
# Compute the multiplier in the antiderivative formula.
dt = t[k+1:] - t[:-k-1]
# Compute the new coefficients
c = np.cumsum(c[:-k-1] * dt) / (k + 1)
c = np.r_[0, c, [c[-1]]*(k+2)]
# New knots
t = np.r_[t[0], t, t[-1]]
k += 1
return t, c, k
| bsd-3-clause |
aborgher/OW-stats-plot | OW_flask.py | 1 | 9956 | from plotly.offline import download_plotlyjs, init_notebook_mode, iplot, plot
from plotly.graph_objs import *
#init_notebook_mode()
import matplotlib.pyplot as plt
import numpy as np
from lxml import etree
from lxml.html import fromstring, tostring
import urllib.request
def create_allinfos(tree,category = 'competitive'): # 'quick play'
allheroes = {}
# xpath_onehero = '//*[@id="stats-section"]/div/div/div[@data-category-id="{0}"]/div/div/table[@class="data-table"]/tbody/tr/td'
xpath_onehero = '//*[@id="{}"]/section[@class="content-box u-max-width-container career-stats-section"]'
xpath_onehero = xpath_onehero + '/div/div[@data-category-id="{}"]/div/div/table[@class="data-table"]/tbody/tr/td'
# [@class="data-table"] is not really needed
# each heroes has an exadecimal number associated for the data-category-id field in the xpath,
# "All Heroes" are treated as a single hero
heroes_exa = {
"All Heroes":"0x02E00000FFFFFFFF",
"Reaper":"0x02E0000000000002",
"Tracer":"0x02E0000000000003",
"Mercy":"0x02E0000000000004",
"Hanzo":"0x02E0000000000005",
"Torbjorn":"0x02E0000000000006",
"Reinhardt":"0x02E0000000000007",
"Pharah":"0x02E0000000000008",
"Winston":"0x02E0000000000009",
"Widowmaker":"0x02E000000000000A",
"Bastion":"0x02E0000000000015",
"Symmetra":"0x02E0000000000016",
"Zenyatta":"0x02E0000000000020",
"Genji":"0x02E0000000000029",
"Roadhog":"0x02E0000000000040",
"McCree":"0x02E0000000000042",
"Junkrat":"0x02E0000000000065",
"Zarya":"0x02E0000000000068",
"Soldier: 76":"0x02E000000000006E",
"Lucio":"0x02E0000000000079",
"D.Va":"0x02E000000000007A",
"Mei":"0x02E00000000000DD",
"Ana":"0x02E000000000013B",
"Sombra":"0x02E000000000012E"
}
for hero in heroes_exa.keys():
temp = tree.xpath(xpath_onehero.format(category,heroes_exa[hero]))
tempdic = {}
for i in range(0,len(temp),2):
tempdic[temp[i].text] = temp[i+1].text
allheroes[hero] = tempdic
return allheroes
def getPlayerInfos(url,category='competitive'):
print("obtaining {0} infos from ".format(category) + url)
request = urllib.request.Request(url)
rawPage = urllib.request.urlopen(request)
read = rawPage.read()
tree = etree.HTML(read)
allheroes = create_allinfos(tree,category)
return allheroes
def getAllHeroInfoName(HeroesStats, hero):
temp = []
for p in HeroesStats.keys():
temp = temp + list(HeroesStats[p][hero].keys())
temp = list(set(temp))
return temp
def myConvToFloat(mystr):
tobereplace = ["%",","," minutes"," minute"," hours"," hour"," seconds"," second",":"]
for r in tobereplace:
mystr = mystr.replace(r,"")
return float(mystr)
def plotHeroForAllPlayersPlotly(hero,HeroesStats,FairCompare=True):
units = ""
allvars = getAllHeroInfoName(HeroesStats,hero)
allvars.sort()
data = []
layout= Layout()
for p in HeroesStats.keys():
xx, yy, my_xticks = [], [], []
pp = p[0:p.find("-")]
for i,var in enumerate(allvars):
if FairCompare:
if "Average" not in var:
continue
xx.append(i)
if not var in HeroesStats[p][hero]:
yy.append(0)
else:
yy.append(myConvToFloat(HeroesStats[p][hero][var]))
if("%" in HeroesStats[p][hero][var]): units = " (%)"
if("hour" in HeroesStats[p][hero][var]): units = " h"
if("hours" in HeroesStats[p][hero][var]): units = " h"
if("minute" in HeroesStats[p][hero][var]): units = " min"
if("minutes" in HeroesStats[p][hero][var]): units = " min"
if("seconds" in HeroesStats[p][hero][var]): units = " sec"
if("second" in HeroesStats[p][hero][var]): units = " sec"
else: units = ""
my_xticks.append(var+units)
xlabelsize = 8
if FairCompare:
xlabelsize = 10
layout = Layout(
title = hero,
xaxis=dict(
title='',
titlefont=dict(family='Arial, sans-serif',size=18,color='lightgrey'),
showticklabels=True,
tickangle=35,
tickfont=dict(family='Old Standard TT, serif',size=xlabelsize,color='black'),
),
yaxis=dict(
title='stats',
titlefont=dict(family='Arial, sans-serif',size=18,color='lightgrey'),
showticklabels=True,
tickangle=0,
tickfont=dict(family='Old Standard TT, serif',size=18,color='black'),
type = 'log'
)
)
data.append(Scatter(x = my_xticks, y = yy, mode = 'markers',
marker=dict(symbol="line-ew-open",size=6,line=dict(width=4)), name = pp))
#data.append(Scatter(x = my_xticks, y = yy, mode = 'markers', name = pp))
fig = Figure(data=data, layout=layout)
plot(fig, filename=hero+'.html')
# Players = ['Ale-1244', 'Frenci-1486', 'Sam-1619', 'kiukki-2350', 'Alby-2701', "ziGno-2418"]
# heroes_name=["Reaper", "Tracer", "Mercy", "Hanzo", "Torbjorn", "Reinhardt", "Pharah",
# "Winston", "Widowmaker", "Bastion", "Symmetra", "Zenyatta", "Genji", "Roadhog",
# "McCree", "Junkrat", "Zarya", "Soldier: 76", "Lucio", "D.Va", "Mei", 'Ana', 'Sombra']
# heroes_name.sort()
# heroes_name_2 = ["All Heroes"] + heroes_name
# HeroesStatsQuickPlay = {}
# HeroesStatsCompetitive = {}
# for p in Players:
# HeroesStatsQuickPlay[p] = getPlayerInfos(prefix + p,'quickplay')
# HeroesStatsCompetitive[p] = getPlayerInfos(prefix + p,'competitive')
### start to code the flask app
from flask import Flask, render_template
from flask_socketio import SocketIO, emit
from flask import request
import json
import plotly
name = 'OW_stats_compare'
app = Flask(name)
app.debug = True
app.config['key'] = 'secret'
socketio = SocketIO(app)
def get_players_stats(players):
# players = app_state['players'].split(",")
players = players.split(",")
HeroesStatsQuickPlay = {}
HeroesStatsCompetitive = {}
prefix = "https://playoverwatch.com/en-gb/career/pc/eu/"
for p in players:
p=p.strip()
try:
HeroesStatsQuickPlay[p] = getPlayerInfos(prefix + p,'quickplay')
HeroesStatsCompetitive[p] = getPlayerInfos(prefix + p,'competitive')
except urllib.error.HTTPError:
print("Players {0} not found".format(p))
return HeroesStatsQuickPlay, HeroesStatsCompetitive
# HeroesStats = {"quickplay":{}, "competitive":{}}
@app.route('/', methods=['POST','GET'])
def index():
if request.method == 'POST':
players_choosen = request.form.get('players', None)
global HeroesStatsQuickPlay
global HeroesStatsCompetitive
HeroesStatsQuickPlay, HeroesStatsCompetitive = get_players_stats(players_choosen)
return render_template('layouts/layout_single_column_and_controls.html', app_name=name)
@socketio.on('replot')
def replot(app_state):
HeroesStats = {}
if app_state['mode'] == 'competitive':
HeroesStats = HeroesStatsCompetitive
elif app_state['mode'] == 'quickplay':
HeroesStats = HeroesStatsQuickPlay
hero = app_state['hero']
units = ""
allvars = getAllHeroInfoName(HeroesStats,hero)
allvars.sort()
data = []
traces = []
for ii,p in enumerate(HeroesStats.keys()):
pp = p[0:p.find("-")]
xx, yy, my_xticks = [], [], []
for i,var in enumerate(allvars):
xx.append(i)
if not var in HeroesStats[p][hero]:
yy.append(0)
else:
yy.append(myConvToFloat(HeroesStats[p][hero][var]))
if("%" in HeroesStats[p][hero][var]): units = " (%)"
if("hour" in HeroesStats[p][hero][var]): units = " h"
if("hours" in HeroesStats[p][hero][var]): units = " h"
if("minute" in HeroesStats[p][hero][var]): units = " min"
if("minutes" in HeroesStats[p][hero][var]): units = " min"
if("seconds" in HeroesStats[p][hero][var]): units = " sec"
if("second" in HeroesStats[p][hero][var]): units = " sec"
else: units = ""
my_xticks.append(var+units)
traces.append(Scatter({'x':my_xticks, 'y':yy, 'mode':'markers', 'name':pp,
'marker':{'symbol':"line-ew-open",'size':6,'line':{'width':4}}}
))
fig = {
'layout':{
'title':hero,
'xaxis':{
'title':'',
'titlefont':{'family':'Arial, sans-serif',
'size':18,'color':'lightgrey'},
'showticklabels':True,
'tickangle':35,
'tickfont':{'family':'Old Standard TT, serif',
'size':8,'color':'black'},
},
'yaxis':{
'title':'stats',
'titlefont':{'family':'Arial, sans-serif','size':18,'color':'lightgrey'},
'showticklabels':True,
'tickangle':0,
'tickfont':{'family':'Old Standard TT, serif','size':18,'color':'black'},
'type':'log'
}
},
'data': traces
}
messages = [{'id': 'Scatter',
'task': 'newPlot',
'data': fig['data'],
'layout': fig['layout'] }]
emit('postMessage', json.dumps(messages, cls=plotly.utils.PlotlyJSONEncoder))
if __name__ == '__main__':
socketio.run(app, host='127.0.0.1', port=5000)
| gpl-3.0 |
littleocub/python_practice | bj_tmp_matplotlib/beijing_2016.py | 1 | 1722 | # beijing_2016
import csv
import matplotlib.dates
from datetime import datetime
from matplotlib import pyplot as plt
def date_to_list(data_index):
""" save date to a list """
results = []
for row in data:
results.append(datetime.strptime(row[data_index], '%Y-%m-%d'))
return results
def data_to_list(data_index):
""" save data to a list """
results = []
for row in data:
results.append(int(row[data_index]))
return results
filename = 'beijing_2016.csv'
with open(filename) as bj:
data = csv.reader(bj)
header = next(data)
# print(header)
# print(next(data))
# get the index of data needed
print('date_akdt', header.index('date_akdt'))
print('high_temp_f', header.index('high_temp_f'))
print('low_temp_f', header.index('low_temp_f'))
# create a list from the remaining contents in the iterable
data = list(data)
# save data to list
high_temp_f_bj = data_to_list(1)
high_temp_c_bj = [int((x-32)/1.8) for x in high_temp_f_bj]
low_temp_f_bj = data_to_list(3)
low_temp_c_bj = [int((x-32)/1.8) for x in low_temp_f_bj]
date = date_to_list(0)
plt.figure(figsize=(12, 5), dpi=100)
plt.plot(date, high_temp_c_bj, c='xkcd:orange')
plt.plot(date, low_temp_c_bj,c='xkcd:azure')
plt.title('Beijing Temperatures (High & Low) - Year 2016', fontsize=22)
plt.ylabel('Temperature (C)', fontsize=20)
plt.tick_params(axis='both', labelsize=16)
plt.fill_between(date, high_temp_c_bj, low_temp_c_bj, facecolor='xkcd:silver', alpha=0.2)
plt.gca().xaxis.set_major_formatter(matplotlib.dates.DateFormatter("%Y-%m"))
plt.gcf().autofmt_xdate()
plt.margins(x=0,y=0.2)
plt.show() | mit |
lgarren/spack | var/spack/repos/builtin/packages/py-sncosmo/package.py | 3 | 2130 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PySncosmo(PythonPackage):
"""SNCosmo is a Python library for high-level supernova cosmology
analysis."""
homepage = "http://sncosmo.readthedocs.io/"
url = "https://pypi.io/packages/source/s/sncosmo/sncosmo-1.2.0.tar.gz"
version('1.2.0', '028e6d1dc84ab1c17d2f3b6378b2cb1e')
# Required dependencies
# py-sncosmo binaries are duplicates of those from py-astropy
extends('python', ignore=r'bin/.*')
depends_on('py-setuptools', type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
depends_on('py-astropy', type=('build', 'run'))
# Recommended dependencies
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-iminuit', type=('build', 'run'))
depends_on('py-emcee', type=('build', 'run'))
depends_on('py-nestle', type=('build', 'run'))
| lgpl-2.1 |
pravsripad/mne-python | tutorials/source-modeling/plot_beamformer_lcmv.py | 10 | 12809 | """
Source reconstruction using an LCMV beamformer
==============================================
This tutorial gives an overview of the beamformer method
and shows how to reconstruct source activity using an LCMV beamformer.
"""
# Authors: Britta Westner <[email protected]>
# Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample, fetch_fsaverage
from mne.beamformer import make_lcmv, apply_lcmv
###############################################################################
# Introduction to beamformers
# ---------------------------
# A beamformer is a spatial filter that reconstructs source activity by
# scanning through a grid of pre-defined source points and estimating activity
# at each of those source points independently. A set of weights is
# constructed for each defined source location which defines the contribution
# of each sensor to this source.
# Beamformers are often used for their focal reconstructions and their ability
# to reconstruct deeper sources. They can also suppress external noise sources.
# The beamforming method applied in this tutorial is the linearly constrained
# minimum variance (LCMV) beamformer :footcite:`VanVeenEtAl1997` operates on
# time series.
# Frequency-resolved data can be reconstructed with the dynamic imaging of
# coherent sources (DICS) beamforming method :footcite:`GrossEtAl2001`.
# As we will see in the following, the spatial filter is computed from two
# ingredients: the forward model solution and the covariance matrix of the
# data.
###############################################################################
# Data processing
# ---------------
# We will use the sample data set for this tutorial and reconstruct source
# activity on the trials with left auditory stimulation.
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
# Read the raw data
raw = mne.io.read_raw_fif(raw_fname)
raw.info['bads'] = ['MEG 2443'] # bad MEG channel
# Set up the epoching
event_id = 1 # those are the trials with left-ear auditory stimuli
tmin, tmax = -0.2, 0.5
events = mne.find_events(raw)
# pick relevant channels
raw.pick(['meg', 'eog']) # pick channels of interest
# Create epochs
proj = False # already applied
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
baseline=(None, 0), preload=True, proj=proj,
reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
# for speed purposes, cut to a window of interest
evoked = epochs.average().crop(0.05, 0.15)
# Visualize averaged sensor space data
evoked.plot_joint()
del raw # save memory
###############################################################################
# Computing the covariance matrices
# ---------------------------------
# Spatial filters use the data covariance to estimate the filter
# weights. The data covariance matrix will be `inverted`_ during the spatial
# filter computation, so it is valuable to plot the covariance matrix and its
# eigenvalues to gauge whether matrix inversion will be possible.
# Also, because we want to combine different channel types (magnetometers and
# gradiometers), we need to account for the different amplitude scales of these
# channel types. To do this we will supply a noise covariance matrix to the
# beamformer, which will be used for whitening.
# The data covariance matrix should be estimated from a time window that
# includes the brain signal of interest,
# and incorporate enough samples for a stable estimate. A rule of thumb is to
# use more samples than there are channels in the data set; see
# :footcite:`BrookesEtAl2008` for more detailed advice on covariance estimation
# for beamformers. Here, we use a time
# window incorporating the expected auditory response at around 100 ms post
# stimulus and extend the period to account for a low number of trials (72) and
# low sampling rate of 150 Hz.
data_cov = mne.compute_covariance(epochs, tmin=0.01, tmax=0.25,
method='empirical')
noise_cov = mne.compute_covariance(epochs, tmin=tmin, tmax=0,
method='empirical')
data_cov.plot(epochs.info)
del epochs
###############################################################################
# When looking at the covariance matrix plots, we can see that our data is
# slightly rank-deficient as the rank is not equal to the number of channels.
# Thus, we will have to regularize the covariance matrix before inverting it
# in the beamformer calculation. This can be achieved by setting the parameter
# ``reg=0.05`` when calculating the spatial filter with
# :func:`~mne.beamformer.make_lcmv`. This corresponds to loading the diagonal
# of the covariance matrix with 5% of the sensor power.
###############################################################################
# The forward model
# -----------------
# The forward model is the other important ingredient for the computation of a
# spatial filter. Here, we will load the forward model from disk; more
# information on how to create a forward model can be found in this tutorial:
# :ref:`tut-forward`.
# Note that beamformers are usually computed in a :class:`volume source space
# <mne.VolSourceEstimate>`, because estimating only cortical surface
# activation can misrepresent the data.
# Read forward model
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-vol-7-fwd.fif'
forward = mne.read_forward_solution(fwd_fname)
###############################################################################
# Handling depth bias
# -------------------
#
# The forward model solution is inherently biased toward superficial sources.
# When analyzing single conditions it is best to mitigate the depth bias
# somehow. There are several ways to do this:
#
# - :func:`mne.beamformer.make_lcmv` has a ``depth`` parameter that normalizes
# the forward model prior to computing the spatial filters. See the docstring
# for details.
# - Unit-noise gain beamformers handle depth bias by normalizing the
# weights of the spatial filter. Choose this by setting
# ``weight_norm='unit-noise-gain'``.
# - When computing the Neural activity index, the depth bias is handled by
# normalizing both the weights and the estimated noise (see
# :footcite:`VanVeenEtAl1997`). Choose this by setting ``weight_norm='nai'``.
#
# Note that when comparing conditions, the depth bias will cancel out and it is
# possible to set both parameters to ``None``.
#
#
# Compute the spatial filter
# --------------------------
# Now we can compute the spatial filter. We'll use a unit-noise gain beamformer
# to deal with depth bias, and will also optimize the orientation of the
# sources such that output power is maximized.
# This is achieved by setting ``pick_ori='max-power'``.
# This gives us one source estimate per source (i.e., voxel), which is known
# as a scalar beamformer.
filters = make_lcmv(evoked.info, forward, data_cov, reg=0.05,
noise_cov=noise_cov, pick_ori='max-power',
weight_norm='unit-noise-gain', rank=None)
# You can save the filter for later use with:
# filters.save('filters-lcmv.h5')
###############################################################################
# It is also possible to compute a vector beamformer, which gives back three
# estimates per voxel, corresponding to the three direction components of the
# source. This can be achieved by setting
# ``pick_ori='vector'`` and will yield a :class:`volume vector source estimate
# <mne.VolVectorSourceEstimate>`. So we will compute another set of filters
# using the vector beamformer approach:
filters_vec = make_lcmv(evoked.info, forward, data_cov, reg=0.05,
noise_cov=noise_cov, pick_ori='vector',
weight_norm='unit-noise-gain', rank=None)
# save a bit of memory
src = forward['src']
del forward
###############################################################################
# Apply the spatial filter
# ------------------------
# The spatial filter can be applied to different data types: raw, epochs,
# evoked data or the data covariance matrix to gain a static image of power.
# The function to apply the spatial filter to :class:`~mne.Evoked` data is
# :func:`~mne.beamformer.apply_lcmv` which is
# what we will use here. The other functions are
# :func:`~mne.beamformer.apply_lcmv_raw`,
# :func:`~mne.beamformer.apply_lcmv_epochs`, and
# :func:`~mne.beamformer.apply_lcmv_cov`.
stc = apply_lcmv(evoked, filters, max_ori_out='signed')
stc_vec = apply_lcmv(evoked, filters_vec, max_ori_out='signed')
del filters, filters_vec
###############################################################################
# Visualize the reconstructed source activity
# -------------------------------------------
# We can visualize the source estimate in different ways, e.g. as a volume
# rendering, an overlay onto the MRI, or as an overlay onto a glass brain.
#
# The plots for the scalar beamformer show brain activity in the right temporal
# lobe around 100 ms post stimulus. This is expected given the left-ear
# auditory stimulation of the experiment.
lims = [0.3, 0.45, 0.6]
kwargs = dict(src=src, subject='sample', subjects_dir=subjects_dir,
initial_time=0.087, verbose=True)
###############################################################################
# On MRI slices (orthoview; 2D)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
stc.plot(mode='stat_map', clim=dict(kind='value', pos_lims=lims), **kwargs)
###############################################################################
# On MNI glass brain (orthoview; 2D)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
stc.plot(mode='glass_brain', clim=dict(kind='value', lims=lims), **kwargs)
###############################################################################
# Volumetric rendering (3D) with vectors
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# These plots can also be shown using a volumetric rendering via
# :meth:`~mne.VolVectorSourceEstimate.plot_3d`. Let's try visualizing the
# vector beamformer case. Here we get three source time courses out per voxel
# (one for each component of the dipole moment: x, y, and z), which appear
# as small vectors in the visualization (in the 2D plotters, only the
# magnitude can be shown):
# sphinx_gallery_thumbnail_number = 7
brain = stc_vec.plot_3d(
clim=dict(kind='value', lims=lims), hemi='both',
views=['coronal', 'sagittal', 'axial'], size=(800, 300),
view_layout='horizontal', show_traces=0.3,
brain_kwargs=dict(silhouette=True), **kwargs)
###############################################################################
# Visualize the activity of the maximum voxel with all three components
# ---------------------------------------------------------------------
# We can also visualize all three components in the peak voxel. For this, we
# will first find the peak voxel and then plot the time courses of this voxel.
peak_vox, _ = stc_vec.get_peak(tmin=0.08, tmax=0.1, vert_as_index=True)
ori_labels = ['x', 'y', 'z']
fig, ax = plt.subplots(1)
for ori, label in zip(stc_vec.data[peak_vox, :, :], ori_labels):
ax.plot(stc_vec.times, ori, label='%s component' % label)
ax.legend(loc='lower right')
ax.set(title='Activity per orientation in the peak voxel', xlabel='Time (s)',
ylabel='Amplitude (a. u.)')
mne.viz.utils.plt_show()
del stc_vec
###############################################################################
# Morph the output to fsaverage
# -----------------------------
#
# We can also use volumetric morphing to get the data to fsaverage space. This
# is for example necessary when comparing activity across subjects. Here, we
# will use the scalar beamformer example.
# We pass a :class:`mne.SourceMorph` as the ``src`` argument to
# `mne.VolSourceEstimate.plot`. To save some computational load when applying
# the morph, we will crop the ``stc``:
fetch_fsaverage(subjects_dir) # ensure fsaverage src exists
fname_fs_src = subjects_dir + '/fsaverage/bem/fsaverage-vol-5-src.fif'
src_fs = mne.read_source_spaces(fname_fs_src)
morph = mne.compute_source_morph(
src, subject_from='sample', src_to=src_fs, subjects_dir=subjects_dir,
niter_sdr=[10, 10, 5], niter_affine=[10, 10, 5], # just for speed
verbose=True)
stc_fs = morph.apply(stc)
del stc
stc_fs.plot(
src=src_fs, mode='stat_map', initial_time=0.085, subjects_dir=subjects_dir,
clim=dict(kind='value', pos_lims=lims), verbose=True)
###############################################################################
# References
# ----------
#
# .. footbibliography::
#
#
# .. LINKS
#
# .. _`inverted`: https://en.wikipedia.org/wiki/Invertible_matrix
| bsd-3-clause |
RyanChinSang/LeagueLatency | BETA/Test Code/Hoverover/Hoverover4.py | 1 | 2967 | import math
import matplotlib.pyplot as plt
class AnnoteFinder(object):
"""callback for matplotlib to display an annotation when points are
clicked on. The point which is closest to the click and within
xtol and ytol is identified.
Register this function like this:
scatter(xdata, ydata)
af = AnnoteFinder(xdata, ydata, annotes)
connect('button_press_event', af)
"""
def __init__(self, xdata, ydata, annotes, ax=None, xtol=None, ytol=None):
self.data = list(zip(xdata, ydata, annotes))
if xtol is None:
xtol = ((max(xdata) - min(xdata))/float(len(xdata)))/2
if ytol is None:
ytol = ((max(ydata) - min(ydata))/float(len(ydata)))/2
self.xtol = xtol
self.ytol = ytol
if ax is None:
self.ax = plt.gca()
else:
self.ax = ax
self.drawnAnnotations = {}
self.links = []
def distance(self, x1, x2, y1, y2):
"""
return the distance between two points
"""
return(math.sqrt((x1 - x2)**2 + (y1 - y2)**2))
def __call__(self, event):
if event.inaxes:
clickX = event.xdata
clickY = event.ydata
if (self.ax is None) or (self.ax is event.inaxes):
annotes = []
# print(event.xdata, event.ydata)
for x, y, a in self.data:
# print(x, y, a)
if ((clickX-self.xtol < x < clickX+self.xtol) and
(clickY-self.ytol < y < clickY+self.ytol)):
annotes.append(
(self.distance(x, clickX, y, clickY), x, y, a))
if annotes:
annotes.sort()
distance, x, y, annote = annotes[0]
self.drawAnnote(event.inaxes, x, y, annote)
for l in self.links:
l.drawSpecificAnnote(annote)
def drawAnnote(self, ax, x, y, annote):
"""
Draw the annotation on the plot
"""
if (x, y) in self.drawnAnnotations:
markers = self.drawnAnnotations[(x, y)]
for m in markers:
m.set_visible(not m.get_visible())
self.ax.figure.canvas.draw_idle()
else:
t = ax.text(x, y, " - %s" % (annote),)
m = ax.scatter([x], [y], marker='d', c='r', zorder=100)
self.drawnAnnotations[(x, y)] = (t, m)
self.ax.figure.canvas.draw_idle()
def drawSpecificAnnote(self, annote):
annotesToDraw = [(x, y, a) for x, y, a in self.data if a == annote]
for x, y, a in annotesToDraw:
self.drawAnnote(self.ax, x, y, a)
x = range(10)
y = range(10)
annotes = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']
fig, ax = plt.subplots()
ax.scatter(x,y)
af = AnnoteFinder(x,y, annotes, ax=ax)
fig.canvas.mpl_connect('button_press_event', af)
plt.show() | gpl-3.0 |
lewisodriscoll/sasview | src/sas/sasgui/guiframe/local_perspectives/plotting/AnnulusSlicer.py | 1 | 18960 | # TODO: the line slicer should listen to all 2DREFRESH events, get the data and slice it
# before pushing a new 1D data update.
#
# TODO: NEED MAJOR REFACTOR
#
import math
import wx
# from copy import deepcopy
# Debug printout
from sas.sasgui.guiframe.events import NewPlotEvent
from sas.sasgui.guiframe.events import StatusEvent
from sas.sasgui.guiframe.events import SlicerParameterEvent
from sas.sasgui.guiframe.events import EVT_SLICER_PARS
from BaseInteractor import _BaseInteractor
from sas.sasgui.guiframe.dataFitting import Data1D
class AnnulusInteractor(_BaseInteractor):
"""
Select an annulus through a 2D plot.
This interactor is used to average 2D data with the region
defined by 2 radius.
this class is defined by 2 Ringinterators.
"""
def __init__(self, base, axes, color='black', zorder=3):
_BaseInteractor.__init__(self, base, axes, color=color)
self.markers = []
self.axes = axes
self.base = base
self.qmax = min(math.fabs(self.base.data2D.xmax),
math.fabs(self.base.data2D.xmin)) # must be positive
self.connect = self.base.connect
# # Number of points on the plot
self.nbins = 36
# Cursor position of Rings (Left(-1) or Right(1))
self.xmaxd = self.base.data2D.xmax
self.xmind = self.base.data2D.xmin
if (self.xmaxd + self.xmind) > 0:
self.sign = 1
else:
self.sign = -1
# Inner circle
self.inner_circle = RingInteractor(self, self.base.subplot,
zorder=zorder,
r=self.qmax / 2.0, sign=self.sign)
self.inner_circle.qmax = self.qmax
self.outer_circle = RingInteractor(self, self.base.subplot,
zorder=zorder + 1, r=self.qmax / 1.8,
sign=self.sign)
self.outer_circle.qmax = self.qmax * 1.2
self.update()
self._post_data()
# Bind to slice parameter events
self.base.Bind(EVT_SLICER_PARS, self._onEVT_SLICER_PARS)
def _onEVT_SLICER_PARS(self, event):
"""
receive an event containing parameters values to reset the slicer
:param event: event of type SlicerParameterEvent with params as
attribute
"""
wx.PostEvent(self.base,
StatusEvent(status="AnnulusSlicer._onEVT_SLICER_PARS"))
event.Skip()
if event.type == self.__class__.__name__:
self.set_params(event.params)
self.base.update()
def set_layer(self, n):
"""
Allow adding plot to the same panel
:param n: the number of layer
"""
self.layernum = n
self.update()
def clear(self):
"""
Clear the slicer and all connected events related to this slicer
"""
self.clear_markers()
self.outer_circle.clear()
self.inner_circle.clear()
self.base.connect.clearall()
self.base.Unbind(EVT_SLICER_PARS)
def update(self):
"""
Respond to changes in the model by recalculating the profiles and
resetting the widgets.
"""
# Update locations
self.inner_circle.update()
self.outer_circle.update()
def save(self, ev):
"""
Remember the roughness for this layer and the next so that we
can restore on Esc.
"""
self.base.freeze_axes()
self.inner_circle.save(ev)
self.outer_circle.save(ev)
def _post_data(self, nbins=None):
"""
Uses annulus parameters to plot averaged data into 1D data.
:param nbins: the number of points to plot
"""
# Data to average
data = self.base.data2D
# If we have no data, just return
if data is None:
return
from sas.sascalc.dataloader.manipulations import Ring
rmin = min(math.fabs(self.inner_circle.get_radius()),
math.fabs(self.outer_circle.get_radius()))
rmax = max(math.fabs(self.inner_circle.get_radius()),
math.fabs(self.outer_circle.get_radius()))
# if the user does not specify the numbers of points to plot
# the default number will be nbins= 36
if nbins is None:
self.nbins = 36
else:
self.nbins = nbins
# # create the data1D Q average of data2D
sect = Ring(r_min=rmin, r_max=rmax, nbins=self.nbins)
sector = sect(self.base.data2D)
if hasattr(sector, "dxl"):
dxl = sector.dxl
else:
dxl = None
if hasattr(sector, "dxw"):
dxw = sector.dxw
else:
dxw = None
new_plot = Data1D(x=(sector.x - math.pi) * 180 / math.pi,
y=sector.y, dy=sector.dy)
new_plot.dxl = dxl
new_plot.dxw = dxw
new_plot.name = "AnnulusPhi" + "(" + self.base.data2D.name + ")"
new_plot.source = self.base.data2D.source
# new_plot.info=self.base.data2D.info
new_plot.interactive = True
new_plot.detector = self.base.data2D.detector
# If the data file does not tell us what the axes are, just assume...
new_plot.xaxis("\\rm{\phi}", 'degrees')
new_plot.yaxis("\\rm{Intensity} ", "cm^{-1}")
if hasattr(data, "scale") and data.scale == 'linear' and \
self.base.data2D.name.count("Residuals") > 0:
new_plot.ytransform = 'y'
new_plot.yaxis("\\rm{Residuals} ", "/")
new_plot.group_id = "AnnulusPhi" + self.base.data2D.name
new_plot.id = "AnnulusPhi" + self.base.data2D.name
new_plot.is_data = True
new_plot.xtransform = "x"
new_plot.ytransform = "y"
self.base.parent.update_theory(data_id=data.id, theory=new_plot)
wx.PostEvent(self.base.parent, NewPlotEvent(plot=new_plot, title="AnnulusPhi"))
def moveend(self, ev):
"""
Called when any dragging motion ends.
Post an event (type =SlicerParameterEvent)
to plotter 2D with a copy slicer parameters
Call _post_data method
"""
self.base.thaw_axes()
# Post parameters to plotter 2D
event = SlicerParameterEvent()
event.type = self.__class__.__name__
event.params = self.get_params()
wx.PostEvent(self.base, event)
def restore(self):
"""
Restore the roughness for this layer.
"""
self.inner_circle.restore()
self.outer_circle.restore()
def move(self, x, y, ev):
"""
Process move to a new position, making sure that the move is allowed.
"""
pass
def set_cursor(self, x, y):
pass
def get_params(self):
"""
Store a copy of values of parameters of the slicer into a dictionary.
:return params: the dictionary created
"""
params = {}
params["inner_radius"] = math.fabs(self.inner_circle._inner_mouse_x)
params["outer_radius"] = math.fabs(self.outer_circle._inner_mouse_x)
params["nbins"] = self.nbins
return params
def set_params(self, params):
"""
Receive a dictionary and reset the slicer with values contained
in the values of the dictionary.
:param params: a dictionary containing name of slicer parameters and
values the user assigned to the slicer.
"""
inner = math.fabs(params["inner_radius"])
outer = math.fabs(params["outer_radius"])
self.nbins = int(params["nbins"])
# # Update the picture
self.inner_circle.set_cursor(inner, self.inner_circle._inner_mouse_y)
self.outer_circle.set_cursor(outer, self.outer_circle._inner_mouse_y)
# # Post the data given the nbins entered by the user
self._post_data(self.nbins)
def freeze_axes(self):
"""
"""
self.base.freeze_axes()
def thaw_axes(self):
"""
"""
self.base.thaw_axes()
def draw(self):
"""
"""
self.base.draw()
class RingInteractor(_BaseInteractor):
"""
Draw a ring Given a radius
"""
def __init__(self, base, axes, color='black', zorder=5, r=1.0, sign=1):
"""
:param: the color of the line that defined the ring
:param r: the radius of the ring
:param sign: the direction of motion the the marker
"""
_BaseInteractor.__init__(self, base, axes, color=color)
self.markers = []
self.axes = axes
# Current radius of the ring
self._inner_mouse_x = r
# Value of the center of the ring
self._inner_mouse_y = 0
# previous value of that radius
self._inner_save_x = r
# Save value of the center of the ring
self._inner_save_y = 0
# Class instantiating RingIterator class
self.base = base
# the direction of the motion of the marker
self.sign = sign
# # Create a marker
try:
# Inner circle marker
x_value = [self.sign * math.fabs(self._inner_mouse_x)]
self.inner_marker = self.axes.plot(x_value, [0], linestyle='',
marker='s', markersize=10,
color=self.color, alpha=0.6,
pickradius=5, label="pick",
zorder=zorder,
visible=True)[0]
except:
x_value = [self.sign * math.fabs(self._inner_mouse_x)]
self.inner_marker = self.axes.plot(x_value, [0], linestyle='',
marker='s', markersize=10,
color=self.color, alpha=0.6,
label="pick",
visible=True)[0]
message = "\nTHIS PROTOTYPE NEEDS THE LATEST"
message += " VERSION OF MATPLOTLIB\n"
message += "Get the SVN version that is at "
message += " least as recent as June 1, 2007"
owner = self.base.base.parent
wx.PostEvent(owner, StatusEvent(status="AnnulusSlicer: %s" % message))
# Draw a circle
[self.inner_circle] = self.axes.plot([], [], linestyle='-', marker='', color=self.color)
# the number of points that make the ring line
self.npts = 40
self.connect_markers([self.inner_marker])
self.update()
def set_layer(self, n):
"""
Allow adding plot to the same panel
:param n: the number of layer
"""
self.layernum = n
self.update()
def clear(self):
"""
Clear the slicer and all connected events related to this slicer
"""
self.clear_markers()
try:
self.inner_marker.remove()
self.inner_circle.remove()
except:
# Old version of matplotlib
for item in range(len(self.axes.lines)):
del self.axes.lines[0]
def get_radius(self):
"""
:return self._inner_mouse_x: the current radius of the ring
"""
return self._inner_mouse_x
def update(self):
"""
Draw the new roughness on the graph.
"""
# Plot inner circle
x = []
y = []
for i in range(self.npts):
phi = 2.0 * math.pi / (self.npts - 1) * i
xval = 1.0 * self._inner_mouse_x * math.cos(phi)
yval = 1.0 * self._inner_mouse_x * math.sin(phi)
x.append(xval)
y.append(yval)
self.inner_marker.set(xdata=[self.sign * math.fabs(self._inner_mouse_x)],
ydata=[0])
self.inner_circle.set_data(x, y)
def save(self, ev):
"""
Remember the roughness for this layer and the next so that we
can restore on Esc.
"""
self._inner_save_x = self._inner_mouse_x
self._inner_save_y = self._inner_mouse_y
self.base.freeze_axes()
def moveend(self, ev):
"""
Called after a dragging motion
"""
self.base.moveend(ev)
def restore(self):
"""
Restore the roughness for this layer.
"""
self._inner_mouse_x = self._inner_save_x
self._inner_mouse_y = self._inner_save_y
def move(self, x, y, ev):
"""
Process move to a new position, making sure that the move is allowed.
"""
self._inner_mouse_x = x
self._inner_mouse_y = y
self.base.base.update()
def set_cursor(self, x, y):
"""
draw the ring given x, y value
"""
self.move(x, y, None)
self.update()
def get_params(self):
"""
Store a copy of values of parameters of the slicer into a dictionary.
:return params: the dictionary created
"""
params = {}
params["radius"] = math.fabs(self._inner_mouse_x)
return params
def set_params(self, params):
"""
Receive a dictionary and reset the slicer with values contained
in the values of the dictionary.
:param params: a dictionary containing name of slicer parameters and
values the user assigned to the slicer.
"""
x = params["radius"]
self.set_cursor(x, self._inner_mouse_y)
class CircularMask(_BaseInteractor):
"""
Draw a ring Given a radius
"""
def __init__(self, base, axes, color='grey', zorder=3, side=None):
"""
:param: the color of the line that defined the ring
:param r: the radius of the ring
:param sign: the direction of motion the the marker
"""
_BaseInteractor.__init__(self, base, axes, color=color)
self.markers = []
self.axes = axes
self.base = base
self.is_inside = side
self.qmax = min(math.fabs(self.base.data.xmax),
math.fabs(self.base.data.xmin)) # must be positive
self.connect = self.base.connect
# Cursor position of Rings (Left(-1) or Right(1))
self.xmaxd = self.base.data.xmax
self.xmind = self.base.data.xmin
if (self.xmaxd + self.xmind) > 0:
self.sign = 1
else:
self.sign = -1
# Inner circle
self.outer_circle = RingInteractor(self, self.base.subplot, 'blue',
zorder=zorder + 1, r=self.qmax / 1.8,
sign=self.sign)
self.outer_circle.qmax = self.qmax * 1.2
self.update()
self._post_data()
# Bind to slice parameter events
# self.base.Bind(EVT_SLICER_PARS, self._onEVT_SLICER_PARS)
def _onEVT_SLICER_PARS(self, event):
"""
receive an event containing parameters values to reset the slicer
:param event: event of type SlicerParameterEvent with params as
attribute
"""
wx.PostEvent(self.base,
StatusEvent(status="AnnulusSlicer._onEVT_SLICER_PARS"))
event.Skip()
if event.type == self.__class__.__name__:
self.set_params(event.params)
self.base.update()
def set_layer(self, n):
"""
Allow adding plot to the same panel
:param n: the number of layer
"""
self.layernum = n
self.update()
def clear(self):
"""
Clear the slicer and all connected events related to this slicer
"""
self.clear_markers()
self.outer_circle.clear()
self.base.connect.clearall()
# self.base.Unbind(EVT_SLICER_PARS)
def update(self):
"""
Respond to changes in the model by recalculating the profiles and
resetting the widgets.
"""
# Update locations
self.outer_circle.update()
# if self.is_inside is not None:
out = self._post_data()
return out
def save(self, ev):
"""
Remember the roughness for this layer and the next so that we
can restore on Esc.
"""
self.base.freeze_axes()
self.outer_circle.save(ev)
def _post_data(self):
"""
Uses annulus parameters to plot averaged data into 1D data.
:param nbins: the number of points to plot
"""
# Data to average
data = self.base.data
# If we have no data, just return
if data is None:
return
mask = data.mask
from sas.sascalc.dataloader.manipulations import Ringcut
rmin = 0
rmax = math.fabs(self.outer_circle.get_radius())
# # create the data1D Q average of data2D
mask = Ringcut(r_min=rmin, r_max=rmax)
if self.is_inside:
out = (mask(data) == False)
else:
out = (mask(data))
# self.base.data.mask=out
return out
def moveend(self, ev):
"""
Called when any dragging motion ends.
Post an event (type =SlicerParameterEvent)
to plotter 2D with a copy slicer parameters
Call _post_data method
"""
self.base.thaw_axes()
# create a 1D data plot
self._post_data()
def restore(self):
"""
Restore the roughness for this layer.
"""
self.outer_circle.restore()
def move(self, x, y, ev):
"""
Process move to a new position, making sure that the move is allowed.
"""
pass
def set_cursor(self, x, y):
pass
def get_params(self):
"""
Store a copy of values of parameters of the slicer into a dictionary.
:return params: the dictionary created
"""
params = {}
params["outer_radius"] = math.fabs(self.outer_circle._inner_mouse_x)
return params
def set_params(self, params):
"""
Receive a dictionary and reset the slicer with values contained
in the values of the dictionary.
:param params: a dictionary containing name of slicer parameters and
values the user assigned to the slicer.
"""
outer = math.fabs(params["outer_radius"])
# # Update the picture
self.outer_circle.set_cursor(outer, self.outer_circle._inner_mouse_y)
# # Post the data given the nbins entered by the user
self._post_data()
def freeze_axes(self):
self.base.freeze_axes()
def thaw_axes(self):
self.base.thaw_axes()
def draw(self):
self.base.update()
| bsd-3-clause |
yanlend/scikit-learn | sklearn/utils/random.py | 234 | 10510 | # Author: Hamzeh Alsalhi <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribtion over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
| bsd-3-clause |
hrjn/scikit-learn | examples/calibration/plot_calibration_curve.py | 113 | 5904 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.model_selection import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration curve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration curve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
AlexRobson/scikit-learn | examples/linear_model/plot_lasso_coordinate_descent_path.py | 254 | 2639 | """
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
The coefficients can be forced to be positive.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(axis=0) # Standardize data (easier to set the l1_ratio parameter)
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print("Computing regularization path using the lasso...")
alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps, fit_intercept=False)
print("Computing regularization path using the positive lasso...")
alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path(
X, y, eps, positive=True, fit_intercept=False)
print("Computing regularization path using the elastic net...")
alphas_enet, coefs_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, fit_intercept=False)
print("Computing regularization path using the positve elastic net...")
alphas_positive_enet, coefs_positive_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False)
# Display results
plt.figure(1)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_enet), coefs_enet.T, linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and Elastic-Net Paths')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
plt.axis('tight')
plt.figure(2)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_positive_lasso), coefs_positive_lasso.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and positive Lasso')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left')
plt.axis('tight')
plt.figure(3)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_enet), coefs_enet.T)
l2 = plt.plot(-np.log10(alphas_positive_enet), coefs_positive_enet.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Elastic-Net and positive Elastic-Net')
plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'),
loc='lower left')
plt.axis('tight')
plt.show()
| bsd-3-clause |
fengzhyuan/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
vincent-legoll/glancing | test/src/bench_mh.py | 1 | 4044 | #! /usr/bin/env python
import os
import sys
import pickle
import timeit
import argparse
import matplotlib.pyplot as plt
import matplotlib.markers
from tutils import local_pythonpath
# Setup project-local PYTHONPATH
local_pythonpath('..', '..', 'src')
import utils
_SETUP = \
'''
import multihash
mh = multihash.multihash_%s(%s)
'''
def bench_one(fname, setup, repeats=1):
return timeit.timeit('mh.hash_file("%s")' % fname, setup=setup,
number=repeats)
def bench_files(files):
lengths = []
times_sh = []
times_mh = {
# Uninteresting buffer sizes: too small
32: [],
64: [],
128: [],
512: [],
1024: [],
1024 * 4: [],
1024 * 1024: [],
10 * 1024 * 1024: [],
}
for fname in files:
lengths.append(os.path.getsize(fname) / (1024 * 1024))
times_sh.append(bench_one(fname, _SETUP % ('serial_exec', '')))
for size, res in times_mh.iteritems():
res.append(bench_one(fname, _SETUP %
('hashlib', 'block_size=%d' % size)))
return lengths, times_sh, times_mh
def plotit(lengths, times_sh, times_mh, image_file, display):
plt.title('Benchmark multiple hash computing implementations')
plt.xlabel('File size in MB')
plt.ylabel('Time in seconds')
plt.rc('lines', linewidth=1, linestyle='-')
# Prepare marker list
mks = set(matplotlib.markers.MarkerStyle().markers.keys())
# Remove unwanted ones
mks -= set((None, 'None', '', ' ', '|', '_', '.', ',', '+', '-', 'd', 'x', '*'))
mks = ['<', '>', '^', 'v', 'o', 'D', 's', 'p', 'h', ]
assert len(mks) >= len(times_mh)
# Experiment with logarithmic scale
plotter = plt.semilogx # plt.plot
# Plot serial exec data
plotter(lengths, times_sh, label='serial exec', marker='*')
# Plot parallel hashlib data, for each block size
for size in sorted(times_mh.keys()):
plotter(lengths, times_mh[size], marker=mks.pop(),
label='parallel hashlib, bs=%s' % (utils.size_t(size),))
# Give some horizontal room
#xmin, xmax = plt.xlim()
#plt.xlim(0, xmax * 1.1)
# Put legend in top left corner
plt.legend(loc=2)
# Save an image of the plotted data
if image_file:
plt.savefig(image_file)
# Display resulting figure
if display:
plt.show()
def parse_args():
parser = argparse.ArgumentParser(description='Benchmark multihash')
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('-l', '--load', dest='load', metavar='FILE',
help='load data from specified file')
group.add_argument('-s', '--store', dest='store', metavar='FILE',
help='store data into specified file')
parser.add_argument(dest='files', metavar='DATAFILE', nargs='*',
help='files to compute checksums of')
parser.add_argument('-d', '--display', dest='display', action='store_true',
help='display plot data')
parser.add_argument('-p', '--plot', dest='plot', metavar='FILE',
help='save plot data to .svg image file')
return parser
def main():
parser = parse_args()
args = parser.parse_args()
if args.load:
if args.files:
print "\nIgnoring DATAFILE(s) parameters, loading data from:", args.load
with open(args.load, 'rb') as fin:
lengths, times_sh, times_mh = pickle.load(fin)
else:
if args.files:
lengths, times_sh, times_mh = bench_files(args.files)
if args.store:
with open(args.store, 'wb+') as fout:
pickle.dump((lengths, times_sh, times_mh), fout)
else:
parser.print_help()
print "\nMissing DATAFILE(s) parameters"
sys.exit(1)
if args.display or args.plot:
plotit(lengths, times_sh, times_mh, args.plot, args.display)
if __name__ == '__main__':
main()
| gpl-3.0 |
akaihola/lusmu | lusmu/tests/test_vector.py | 1 | 12243 | """Test suite for lusmu.vector
Copyright 2013 Eniram Ltd. See the LICENSE file at the top-level directory of
this distribution and at https://github.com/akaihola/lusmu/blob/master/LICENSE
"""
import tempfile
from unittest import TestCase
from mock import patch
import joblib
from nose.tools import assert_raises, eq_
import numpy as np
import pandas as pd
from lusmu.core import DIRTY
from lusmu.tests.test_core import (NoOutputTypeAction,
NoneOutputTypeAction,
IntOutputTypeAction)
from lusmu.vector import Input
from lusmu import vector
from lusmu.tests.tools import parameterize
def sum(*args):
return sum(args)
class VectorEq(vector.VectorEquality):
"""Mock node class implementing the vector equality test"""
def __init__(self, value):
self._value = value
def test_scalar_equality():
"""Test cases for lusmu.vector.VectorEq._value_eq() with Python scalars"""
@parameterize
def check(value, other_value, expected):
"""Scalar node value {0} == {1}: {2}"""
# pylint: disable=W0212
# Access to a protected member of a client class
vector = VectorEq(value)
assert expected == vector._value_eq(other_value)
yield check(DIRTY, DIRTY, True)
yield check(DIRTY, 0, False)
yield check(0, 0, True)
yield check(0, 1, False)
yield check(0, 0.0, False)
yield check(0, 1.0, False)
yield check(0.0, 0.0, True)
yield check(0.0, 1.0, False)
yield check('a', 'a', True)
yield check('a', 'b', False)
def test_numpy_vector_equality():
"""Test cases for lusmu.vector.VectorEq._value_eq() with numpy arrays"""
@parameterize
def check(value, other_value, expected):
"""Vector node value {0} == {1}: {2}"""
# pylint: disable=W0212
# Access to a protected member of a client class
vector = VectorEq(np.array(value))
assert expected == vector._value_eq(np.array(other_value))
yield check([], [], True)
yield check([1], [], False)
yield check([], [2], False)
yield check([3], [3], True)
yield check([4], [4, 5], False)
yield check([4], [4, 4], False)
yield check([4, 5], [4], False)
yield check([6, 7, 8], [6, 7, 8], True)
yield check([9, np.nan], [9, np.nan], True)
yield check([9, 10], [9, np.nan], False)
def test_numpy_vector_equality_others():
"""Test cases for lusmu.vector.VectorEq._value_eq() with complex data types
"""
@parameterize
def check(value, other_value, expected):
"""Vector node value {0} == {1}: {2}"""
# pylint: disable=W0212
# Access to a protected member of a client class
vector = VectorEq(value)
assert expected == vector._value_eq(other_value)
yield check(DIRTY, np.array([[1,2],[3,4]]), False)
yield check(np.array([[1,2],[3,4]]), np.array([[1,2],[3,4]]), True)
yield check(np.array([[1,2],[3,4]]), np.array([[1,2],[3,5]]), False)
yield check(np.array([[1,2],[3,4]]), [[1,2],[3,4]], False)
yield check(np.array([[1,2]]), np.array([[1,2],[1,2]]), False)
def test_pandas_vector_equality():
"""Test cases for lusmu.vector.VectorEq._value_eq() with pandas Series"""
@parameterize
def check(value, index, other_value, other_index, expected):
"""Series node value {0}/{1} == {2}/{3}: {4}"""
# pylint: disable=W0212
# Access to a protected member of a client class
this = pd.Series(value, index=pd.to_datetime(index))
other = pd.Series(other_value, index=pd.to_datetime(other_index))
vector = VectorEq(this)
assert expected == vector._value_eq(other)
yield check([], [], [], [], True)
yield check([1], ['2013-10-15'], [], [], False)
yield check([], [], [2], ['2013-10-15'], False)
yield check([3], ['2013-10-15'], [3], ['2013-10-15'], True)
yield check([4], ['2013-10-15'], [4, 5],
['2013-10-15', '2013-10-16'],
False)
yield check([4, 5], ['2013-10-15', '2013-10-16'],
[4], ['2013-10-15'],
False)
yield check([6, 7, 8], ['2013-10-15', '2013-10-16', '2013-10-17'],
[6, 7, 8], ['2013-10-15', '2013-10-16', '2013-10-17'],
True)
yield check([6, 7, 8], ['2013-10-15', '2013-10-16', '2013-10-17'],
[6, 7, 8], ['2013-10-15', '2013-10-16', '2013-10-18'],
False)
yield check([9, np.nan], ['2013-10-15', '2013-10-16'],
[9, np.nan], ['2013-10-15', '2013-10-16'],
True)
yield check([9, np.nan], ['2013-10-15', '2013-10-16'],
[9, np.nan], ['2013-10-15', '2013-10-17'],
False)
yield check([9, 10], ['2013-10-15', '2013-10-16'],
[9, np.nan], ['2013-10-15', '2013-10-16'],
False)
def test_mixed_vector_equality():
"""Test cases for lusmu.vector.VectorEq._value_eq() with pandas Series"""
@parameterize
def check(value, index, other_value, expected):
"""Series node value {0}/{1} == {2}: {3}"""
# pylint: disable=W0212
# Access to a protected member of a client class
this = pd.Series(value, index=pd.to_datetime(index))
other = np.array(other_value)
vector = VectorEq(this)
assert expected == vector._value_eq(other)
yield check([], [], [], False)
yield check([1], ['2013-10-15'], [], False)
yield check([], [], [2], False)
yield check([3], ['2013-10-15'], [3], False)
yield check([4], ['2013-10-15'], [4, 5], False)
yield check([4, 5], ['2013-10-15', '2013-10-16'], [4], False)
class InputSetValueTestCase(TestCase):
def test_no_value(self):
inp = vector.Input()
eq_(None, inp.last_timestamp)
def test_dirty_value(self):
inp = vector.Input(value=DIRTY)
eq_(None, inp.last_timestamp)
def test_initial_value(self):
inp = vector.Input(value=pd.Series([1, 2], [1001, 1002]))
eq_(1002, inp.last_timestamp)
def test_set_value(self):
inp = vector.Input()
inp.value = pd.Series([1, 2], index=[1001, 1002])
eq_(1002, inp.last_timestamp)
def test_scalar_value(self):
inp = vector.Input(value=100000.0)
eq_(None, inp.last_timestamp)
def test_array_value(self):
inp = vector.Input(value=np.array([1, 2]))
eq_(None, inp.last_timestamp)
def _pickle_unpickle(data):
with tempfile.NamedTemporaryFile(delete=True) as pickle_file:
pickle_file.close()
joblib.dump(data, pickle_file.name)
return joblib.load(pickle_file.name)
def test_pickling():
@parameterize
def check(node_class, attribute, value, expected):
"""{0.__name__}.{1} pickling works as expected"""
node = node_class()
setattr(node, attribute, value)
unpickled_node = _pickle_unpickle(node)
if isinstance(expected, type) and issubclass(expected, Exception):
assert_raises(expected, getattr, unpickled_node, attribute)
else:
value = getattr(unpickled_node, attribute)
if callable(expected):
assert expected(value)
elif isinstance(expected, np.ndarray):
assert vector.vector_eq(expected, value)
else:
assert expected == value
# arguments: (node class, attribute, value to set,
# expected value/exception/test)
yield check(vector.Input, 'name', 'constant',
'constant')
yield check(vector.Input, '_value', 42.0,
42.0)
yield check(vector.Input, '_value', DIRTY,
DIRTY)
yield check(vector.Input, '_value', np.array([42.0]),
np.array([42.0]))
yield check(vector.Input, 'last_timestamp', 1234,
1234)
yield check(vector.Input, 'extra_attribute', 42.0,
AttributeError)
yield check(vector.Node, 'name', 'constant',
'constant')
yield check(vector.Node, '_value', 42.0,
42.0)
yield check(vector.Node, '_value', np.array([42.0]),
np.array([42.0]))
yield check(vector.Node, '_action', sum,
lambda _action: _action == sum)
yield check(vector.Node, 'triggered', True,
True)
yield check(vector.Node, '_positional_inputs',
(vector.Input(name='foo'),),
(lambda _positional_inputs:
[n.name for n in _positional_inputs] == ['foo']))
yield check(vector.Node, '_keyword_inputs',
{'foo': vector.Input(name='bar')},
(lambda _keyword_inputs:
{k: v.name for k, v in _keyword_inputs.items()}
== {'foo': 'bar'}))
yield check(vector.Node, 'extra_attribute', 42.0,
AttributeError)
def test_input_equality():
@parameterize
def check(_, a, b, expected):
"""Input.__eq__ is {3} for {0}"""
result = a == b
eq_(expected, result)
yield check('unnamed (auto-named) dirty value inputs',
Input(name=None, value=DIRTY), Input(name=None, value=DIRTY),
False)
yield check('non-matching names',
Input(name='a', value=DIRTY), Input(name='b', value=DIRTY),
False)
yield check('named vs. unnamed node',
Input(name='a', value=DIRTY), Input(name=None, value=DIRTY),
False)
class VectorNodeVerifyOutputTypeTestCase(TestCase):
def setUp(self):
self.input = Input()
def test_disabled_and_no_output_type(self):
node = vector.Node(action=NoOutputTypeAction(),
inputs=vector.Node.inputs(self.input))
self.input.value = np.array(['42'])
node._evaluate()
def test_disabled_and_none_output_type(self):
node = vector.Node(action=NoneOutputTypeAction(),
inputs=vector.Node.inputs(self.input))
self.input.value = np.array(['42'])
node._evaluate()
def test_disabled_and_correct_output_type(self):
node = vector.Node(action=IntOutputTypeAction(),
inputs=vector.Node.inputs(self.input))
self.input.value = np.array([42])
node._evaluate()
def test_disabled_and_wrong_output_type(self):
node = vector.Node(action=IntOutputTypeAction(),
inputs=vector.Node.inputs(self.input))
self.input.value = np.array(['42'])
node._evaluate()
def test_enabled_and_no_output_type(self):
with patch('lusmu.core.VERIFY_OUTPUT_TYPES', True):
node = vector.Node(action=NoOutputTypeAction(),
inputs=vector.Node.inputs(self.input))
self.input.value = np.array(['42'])
node._evaluate()
def test_enabled_and_none_output_type(self):
with patch('lusmu.core.VERIFY_OUTPUT_TYPES', True):
node = vector.Node(action=NoneOutputTypeAction(),
inputs=vector.Node.inputs(self.input))
self.input.value = np.array(['42'])
node._evaluate()
def test_enabled_and_correct_output_type(self):
with patch('lusmu.core.VERIFY_OUTPUT_TYPES', True):
node = vector.Node(action=IntOutputTypeAction(),
inputs=vector.Node.inputs(self.input))
self.input.value = np.array([42])
node._evaluate()
def test_enabled_and_wrong_output_type(self):
with patch('lusmu.core.VERIFY_OUTPUT_TYPES', True):
with assert_raises(TypeError) as exc:
node = vector.Node(name='node',
action=IntOutputTypeAction(),
inputs=vector.Node.inputs(self.input))
self.input.value = np.array(['42'])
node._evaluate()
self.assertEqual(
"The output value type 'string_' for [node]\n"
"doesn't match the expected type 'int' for action "
'"int_action".', str(exc.exception))
| bsd-3-clause |
jakobworldpeace/scikit-learn | examples/linear_model/lasso_dense_vs_sparse_data.py | 348 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
vybstat/scikit-learn | examples/neighbors/plot_kde_1d.py | 347 | 5100 | """
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <[email protected]>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
| bsd-3-clause |
bugraoral/TextRank | main.py | 1 | 4731 | import nltk
import string
import itertools
import os
from pos_tagger import tag
import matplotlib.pyplot as plt
dataPath = 'news'
categoryList = os.listdir(dataPath)
def removePunc(text,replace):
punct = set(string.punctuation + '̇')
noPuncText = ''
for ch in text:
if ch in punct:
noPuncText += replace
else:
noPuncText += ch
return noPuncText
def extract_candidate_words(text, good_tags=set(['Adj', 'Noun', 'Noun_Nom', 'Verb'])):
stop_words = set(nltk.corpus.stopwords.words('turkish'))
tagged_words = itertools.chain.from_iterable((tag(removePunc(sent,' ').strip())
for sent in nltk.sent_tokenize(text)))
candidates = [word.lower() for word, tag in tagged_words
if tag in good_tags and word.lower() not in stop_words
and (len(word) > 2)]
return candidates
def score_keyphrases_by_textrank(text, n_keywords=0.05):
from itertools import takewhile, tee
import networkx, nltk
stop_words = set(nltk.corpus.stopwords.words('turkish'))
# tokenize for all words, and extract *candidate* words
words = [word.lower()
for sent in nltk.sent_tokenize(text)
for word in nltk.word_tokenize(removePunc(sent,' ').strip())
if len(word) > 2 and word.lower() not in stop_words]
candidates = extract_candidate_words(text)
# build graph, each node is a unique candidate
graph = networkx.Graph()
graph.add_nodes_from(set(candidates))
# iterate over word-pairs, add unweighted edges into graph
def pairwise(iterable):
"""s -> (s0,s1), (s1,s2), (s2, s3), ..."""
a, b = tee(iterable)
next(b, None)
return zip(a, b)
for w1, w2 in pairwise(candidates):
if w2:
graph.add_edge(*sorted([w1, w2]))
# score nodes using default pagerank algorithm, sort by score, keep top n_keywords
ranks = networkx.pagerank(graph)
if 0 < n_keywords < 1:
n_keywords = int(round(len(candidates) * n_keywords))
word_ranks = {word_rank[0]: word_rank[1]
for word_rank in sorted(ranks.items(), key=lambda x: x[1], reverse=True)[:n_keywords]}
keywords = set(word_ranks.keys())
# merge keywords into keyphrases
keyphrases = {}
j = 0
for i, word in enumerate(words):
if i < j:
continue
if word in keywords:
kp_words = list(takewhile(lambda x: x in keywords, words[i:i+10]))
indexL = words[i:i+10].index(kp_words[0])
indexR = words[i:i+10][::-1].index(kp_words[-1])
test_words = words[indexL:indexR+1]
avg_pagerank = sum(word_ranks[w] for w in kp_words) / float(len(kp_words))
keyphrases[' '.join(kp_words)] = avg_pagerank
# counter as hackish way to ensure merged keyphrases are non-overlapping
j = i + len(test_words)
for item in candidates:
if item not in keywords and item in graph:
graph.remove_node(item)
plt.figure(num=None, figsize=(20, 20), dpi=80)
plt.axis('off')
fig = plt.figure(1)
sp = networkx.spring_layout(graph)
networkx.draw_networkx_nodes(graph,sp)
networkx.draw_networkx_edges(graph,sp)
networkx.draw_networkx_labels(graph,sp)
plt.show()
return sorted(keyphrases.items(), key=lambda x: x[1], reverse=True)
def readFile(filePath):
tmp = ''
with open(filePath) as f:
tmp = f.readlines()
text = ''.join(tmp[1:])
title = tmp[0].lower()
return text,title
totalNews = 0
totalCorrectNews = 0
for category in categoryList:
categoryNews = 0
correctNews = 0
if not category.startswith('.'):
location = dataPath + '/' + category
newsList = [name for name in os.listdir(location) if not name.startswith('.')]
count = len(newsList)
percent = 10
index = int((count*percent)/100)
for news in newsList[0:index]:
totalNews += 1
categoryNews += 1
text,title = readFile(location + '/' + news)
keywords = score_keyphrases_by_textrank(text)
for keyword,score in keywords:
if removePunc(keyword,'') in title:
correctNews += 1
totalCorrectNews += 1
break
print('Accuracy for %s is %f' % (category,float(correctNews)/categoryNews))
print('Total accuracy is %f' % (float(totalCorrectNews)/totalNews))
'''
with open(filePath) as f:
text = ''.join(f.readlines()[1:])
keywords = score_keyphrases_by_textrank(text)
for keyword,score in keywords:
print(removePunc(keyword,''),score)
''' | gpl-3.0 |
vermouthmjl/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_scalability.py | 85 | 5728 | """
============================================
Scalability of Approximate Nearest Neighbors
============================================
This example studies the scalability profile of approximate 10-neighbors
queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200``
when varying the number of samples in the dataset.
The first plot demonstrates the relationship between query time and index size
of LSHForest. Query time is compared with the brute force method in exact
nearest neighbor search for the same index sizes. The brute force queries have a
very predictable linear scalability with the index (full scan). LSHForest index
have sub-linear scalability profile but can be slower for small datasets.
The second plot shows the speedup when using approximate queries vs brute force
exact queries. The speedup tends to increase with the dataset size but should
reach a plateau typically when doing queries on datasets with millions of
samples and a few hundreds of dimensions. Higher dimensional datasets tends to
benefit more from LSHForest indexing.
The break even point (speedup = 1) depends on the dimensionality and structure
of the indexed data and the parameters of the LSHForest index.
The precision of approximate queries should decrease slowly with the dataset
size. The speed of the decrease depends mostly on the LSHForest parameters and
the dimensionality of the data.
"""
from __future__ import division
print(__doc__)
# Authors: Maheshakya Wijewardena <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import time
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Parameters of the study
n_samples_min = int(1e3)
n_samples_max = int(1e5)
n_features = 100
n_centers = 100
n_queries = 100
n_steps = 6
n_iter = 5
# Initialize the range of `n_samples`
n_samples_values = np.logspace(np.log10(n_samples_min),
np.log10(n_samples_max),
n_steps).astype(np.int)
# Generate some structured data
rng = np.random.RandomState(42)
all_data, _ = make_blobs(n_samples=n_samples_max + n_queries,
n_features=n_features, centers=n_centers, shuffle=True,
random_state=0)
queries = all_data[:n_queries]
index_data = all_data[n_queries:]
# Metrics to collect for the plots
average_times_exact = []
average_times_approx = []
std_times_approx = []
accuracies = []
std_accuracies = []
average_speedups = []
std_speedups = []
# Calculate the average query time
for n_samples in n_samples_values:
X = index_data[:n_samples]
# Initialize LSHForest for queries of a single neighbor
lshf = LSHForest(n_estimators=20, n_candidates=200,
n_neighbors=10).fit(X)
nbrs = NearestNeighbors(algorithm='brute', metric='cosine',
n_neighbors=10).fit(X)
time_approx = []
time_exact = []
accuracy = []
for i in range(n_iter):
# pick one query at random to study query time variability in LSHForest
query = queries[[rng.randint(0, n_queries)]]
t0 = time.time()
exact_neighbors = nbrs.kneighbors(query, return_distance=False)
time_exact.append(time.time() - t0)
t0 = time.time()
approx_neighbors = lshf.kneighbors(query, return_distance=False)
time_approx.append(time.time() - t0)
accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean())
average_time_exact = np.mean(time_exact)
average_time_approx = np.mean(time_approx)
speedup = np.array(time_exact) / np.array(time_approx)
average_speedup = np.mean(speedup)
mean_accuracy = np.mean(accuracy)
std_accuracy = np.std(accuracy)
print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, "
"accuracy: %0.2f +/-%0.2f" %
(n_samples, average_time_exact, average_time_approx, average_speedup,
mean_accuracy, std_accuracy))
accuracies.append(mean_accuracy)
std_accuracies.append(std_accuracy)
average_times_exact.append(average_time_exact)
average_times_approx.append(average_time_approx)
std_times_approx.append(np.std(time_approx))
average_speedups.append(average_speedup)
std_speedups.append(np.std(speedup))
# Plot average query time against n_samples
plt.figure()
plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx,
fmt='o-', c='r', label='LSHForest')
plt.plot(n_samples_values, average_times_exact, c='b',
label="NearestNeighbors(algorithm='brute', metric='cosine')")
plt.legend(loc='upper left', prop=dict(size='small'))
plt.ylim(0, None)
plt.ylabel("Average query time in seconds")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Impact of index size on response time for first "
"nearest neighbors queries")
# Plot average query speedup versus index size
plt.figure()
plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups,
fmt='o-', c='r')
plt.ylim(0, None)
plt.ylabel("Average speedup")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Speedup of the approximate NN queries vs brute force")
# Plot average precision versus index size
plt.figure()
plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c')
plt.ylim(0, 1.1)
plt.ylabel("precision@10")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("precision of 10-nearest-neighbors queries with index size")
plt.show()
| bsd-3-clause |
desihub/fiberassign | py/fiberassign/vis.py | 1 | 23940 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
fiberassign.vis
=======================
Visualization tools.
"""
from __future__ import absolute_import, division, print_function
import os
import warnings
import numpy as np
import multiprocessing as mp
from functools import partial
import fitsio
from ._internal import Shape
from .utils import Logger, default_mp_proc
from .hardware import (load_hardware, FIBER_STATE_STUCK, FIBER_STATE_BROKEN,
radec2xy)
from .tiles import load_tiles
from .targets import (Targets, load_target_table,
TARGET_TYPE_SCIENCE, TARGET_TYPE_SKY,
TARGET_TYPE_SUPPSKY,
TARGET_TYPE_STANDARD, TARGET_TYPE_SAFE, create_tagalong)
from .assign import (read_assignment_fits_tile, result_tiles, result_path,
avail_table_to_dict, get_parked_thetaphi)
plt = None
def set_matplotlib_pdf_backend():
"""Set the matplotlib backend to PDF.
This is necessary to render high resolution figures.
"""
global plt
if plt is not None:
return
try:
import matplotlib
matplotlib.use("pdf")
import matplotlib.pyplot as plt
except ValueError:
warnings.warn(
"""Couldn't set the PDF matplotlib backend,
positioner plots may be low resolution.
Proceeding with the default matplotlib backend."""
)
import matplotlib.pyplot as plt
def plot_target_type_color(tgtype):
color = "fuchsia"
tp = int(tgtype)
if (tp & TARGET_TYPE_SAFE) != 0:
color = "black"
elif (tp & TARGET_TYPE_SKY) != 0:
color = "blue"
elif (tp & TARGET_TYPE_SUPPSKY) != 0:
color = "blue"
elif (tp & TARGET_TYPE_STANDARD) != 0:
color = "gold"
if (tp & TARGET_TYPE_SCIENCE) != 0:
color = "green"
elif (tp & TARGET_TYPE_SCIENCE) != 0:
color = "red"
return color
def plot_positioner(ax, patrol_rad, loc, center, shptheta, shpphi, color="k",
linewidth=0.2, fontpt=2.0):
"""Plot one fiber positioner.
"""
set_matplotlib_pdf_backend()
if patrol_rad > 0:
patrol = plt.Circle((center[0], center[1]), radius=patrol_rad, fc=color,
ec="none", alpha=0.1)
ax.add_artist(patrol)
# Plot the arm from the center to the body
thetacent = shptheta.axis
armwidth = 0.25
armlen = np.sqrt((thetacent[1] - center[1])**2
+ (thetacent[0] - center[0])**2)
armang = np.arctan2(thetacent[1] - center[1], thetacent[0] - center[0])
sinarm = np.sin(armang)
cosarm = np.cos(armang)
arm_xoff = center[0] + (0.5*armwidth) * sinarm
arm_yoff = center[1] - (0.5*armwidth) * cosarm
armang_deg = armang * 180.0 / np.pi
arm = plt.Rectangle((arm_xoff, arm_yoff), armlen, armwidth,
angle=armang_deg, color=color, linewidth=2*linewidth,
fill=False)
ax.add_artist(arm)
for piece in [shptheta, shpphi]:
for circle in piece.circles:
xcent, ycent = circle.center
rad = circle.radius
circ = plt.Circle((xcent, ycent), radius=rad, fc="none", ec=color,
linewidth=linewidth)
ax.add_artist(circ)
for segs in piece.segments:
xpts = np.array([p[0] for p in segs.points])
ypts = np.array([p[1] for p in segs.points])
ax.plot(xpts, ypts, linewidth=linewidth, color=color)
xtxt = center[0] - 2 * armwidth * cosarm
ytxt = center[1] - 2 * armwidth * sinarm
ax.text(xtxt, ytxt, "{}".format(loc),
color='k', fontsize=fontpt,
horizontalalignment='center',
verticalalignment='center',
bbox=None)
# bbox=dict(fc='w', ec='none', pad=1, alpha=1.0))
return
def plot_positioner_simple(ax, patrol_rad, loc, center, theta_ang, theta_arm,
phi_ang, phi_arm, color="k", linewidth=0.2):
"""Plot one fiber positioner.
This uses a simpler representation of the positioner geometry, in order to
speed up the plotting.
"""
set_matplotlib_pdf_backend()
patrol = plt.Circle((center[0], center[1]), radius=patrol_rad, fc=color,
ec="none", alpha=0.1)
ax.add_artist(patrol)
# Plot the arm from the center to the phi body
theta_x = theta_arm * np.cos(theta_ang) + center[0]
theta_y = theta_arm * np.sin(theta_ang) + center[1]
ax.plot([center[0], theta_x], [center[1], theta_y], color=color,
linewidth=5*linewidth)
# Plot the phi arm.
phi_x = phi_arm * np.cos(phi_ang + theta_ang) + theta_x
phi_y = phi_arm * np.sin(phi_ang + theta_ang) + theta_y
ax.plot([theta_x, phi_x], [theta_y, phi_y], color=color,
linewidth=linewidth)
fontpt = 2.0
xtxt = center[0]
ytxt = center[1] + 0.5
ax.text(xtxt, ytxt, "{}".format(loc),
color='k', fontsize=fontpt,
horizontalalignment='center',
verticalalignment='center',
bbox=None)
return
def plot_positioner_invalid(ax, patrol_rad, loc, center, color="k", linewidth=0.2,
fontpt=2.0):
"""Plot one fiber positioner which has invalid angles.
"""
set_matplotlib_pdf_backend()
patrol = plt.Circle((center[0], center[1]), radius=patrol_rad, fc=color,
ec="none", alpha=0.1)
ax.add_artist(patrol)
xtxt = center[0]
ytxt = center[1] + 0.5
ax.text(xtxt, ytxt, "{}".format(loc),
color='k', fontsize=fontpt,
horizontalalignment='center',
verticalalignment='center',
bbox=None)
return
def plot_tile_targets_props(hw, tagalong, tile_ra, tile_dec,
tile_obstime, tile_obstheta, tile_obsha,
tgs, avail_tgid=None):
if avail_tgid is None:
avail_tgid = tgs.ids()
ra, dec = tagalong.get_for_ids(avail_tgid, ['RA', 'DEC'])
color = list()
for idx, tgid in enumerate(avail_tgid):
tg = tgs.get(tgid)
color.append(plot_target_type_color(tg.type))
# We disable threading here, since it does not interact well with
# multiprocessing.
tgx,tgy = radec2xy(hw, tile_ra, tile_dec, tile_obstime, tile_obstheta, tile_obsha,
ra, dec, False, threads=1)
tgxy = list(zip(tgx,tgy))
props = {tgid: {"xy": xy, "color": cl} for tgid, xy, cl
in zip(avail_tgid, tgxy, color)}
return props
def plot_available(ax, targetprops, selected, linewidth=0.1):
mwidth = 5.0 * linewidth
xdata = np.full(len(selected), 9999.9, dtype=np.float64)
ydata = np.full(len(selected), 9999.9, dtype=np.float64)
color = list()
for idx, tgid in enumerate(selected):
xdata[idx] = targetprops[tgid]["xy"][0]
ydata[idx] = targetprops[tgid]["xy"][1]
color.append(targetprops[tgid]["color"])
ax.scatter(xdata, ydata, color=color, marker=".",
linewidth=linewidth, s=mwidth)
return
def plot_assignment(ax, hw, targetprops, tile_assigned, linewidth=0.1,
real_shapes=False, fontpt=2.):
log = Logger.get()
center_mm = hw.loc_pos_curved_mm
theta_arm = hw.loc_theta_arm
phi_arm = hw.loc_phi_arm
theta_offset = hw.loc_theta_offset
theta_min = hw.loc_theta_min
theta_max = hw.loc_theta_max
theta_pos = hw.loc_theta_pos
phi_offset = hw.loc_phi_offset
phi_min = hw.loc_phi_min
phi_max = hw.loc_phi_max
phi_pos = hw.loc_phi_pos
state = hw.state
loc_petal = dict(hw.loc_petal)
device_type = dict(hw.loc_device_type)
assigned = np.array(sorted(tile_assigned.keys()), dtype=np.int32)
# Plot GFA / Petal edges. Only plot one shape per petal, although
# the code formally allows unique petal / GFA boundaries per device.
if len(assigned) > 0:
edge_gfa = dict()
edge_petal = dict()
for loc in assigned:
pt = loc_petal[loc]
if pt not in edge_gfa:
edge_gfa[pt] = hw.loc_gfa_excl[loc]
edge_petal[pt] = hw.loc_petal_excl[loc]
for pt, shp in edge_gfa.items():
for segs in shp.segments:
xpts = np.array([p[0] for p in segs.points])
ypts = np.array([p[1] for p in segs.points])
ax.plot(xpts, ypts, linewidth=0.2*linewidth, color="gray")
for pt, shp in edge_petal.items():
for segs in shp.segments:
xpts = np.array([p[0] for p in segs.points])
ypts = np.array([p[1] for p in segs.points])
ax.plot(xpts, ypts, linewidth=0.2*linewidth, color="gray")
for lid in assigned:
color = "fuchsia"
if (device_type[lid] != "POS") and (device_type[lid] != "ETC"):
continue
shptheta = Shape()
shpphi = Shape()
theta = None
phi = None
center = center_mm[lid]
tgid = tile_assigned[lid][0]
is_stuck_sky = tile_assigned[lid][1]
patrol_rad = theta_arm[lid] + phi_arm[lid]
failed = False
is_assigned = (tgid >= 0)
if is_assigned:
# This fiber is assigned. Plot the positioner located at the
# assigned target.
failed = hw.loc_position_xy(lid, targetprops[tgid]["xy"],
shptheta, shpphi)
if failed:
msg = "Positioner at location {} cannot move to target {} at (x, y) = ({}, {}). This should have been dected during assignment!".format(lid, tgid, targetprops[tgid]["xy"][0], targetprops[tgid]["xy"][1])
log.warning(msg)
else:
color = targetprops[tgid]["color"]
theta, phi = hw.xy_to_thetaphi(
center, targetprops[tgid]["xy"],
theta_arm[lid], phi_arm[lid],
theta_offset[lid], phi_offset[lid],
theta_min[lid], phi_min[lid],
theta_max[lid], phi_max[lid],
)
else:
# This fiber is unassigned.
if (state[lid] & FIBER_STATE_STUCK) or (state[lid] & FIBER_STATE_BROKEN):
# The positioner is stuck or fiber broken. Plot it at its current
# location.
color = "gray"
if is_stuck_sky:
color = "cyan"
theta = theta_pos[lid] + theta_offset[lid]
phi = phi_pos [lid] + phi_offset [lid]
msg = "Device location {}, state {} is stuck / broken, plotting fixed theta = {}, phi = {}".format(
lid, state[lid], theta, phi
)
log.debug(msg)
else:
# Plot the positioner in its home (parked) position
theta, phi = get_parked_thetaphi(theta_offset[lid],
theta_min[lid], theta_max[lid],
phi_offset[lid],
phi_min[lid], phi_max[lid])
msg = "Device location {}, state {} is unassigned, plotting parked theta = {}, phi = {}".format(
lid, state[lid], theta, phi
)
log.debug(msg)
failed = hw.loc_position_thetaphi(
lid, theta, phi, shptheta, shpphi, True
)
if failed:
msg = "Positioner at location {} cannot move to its stuck or home position. This should never happen!".format(lid)
log.warning(msg)
if failed:
plot_positioner_invalid(
ax, patrol_rad, lid, center, color=color, linewidth=linewidth,
fontpt=fontpt
)
else:
if real_shapes:
plot_positioner(
ax, patrol_rad, lid, center, shptheta, shpphi,
color=color, linewidth=linewidth, fontpt=fontpt
)
else:
plot_positioner_simple(
ax, patrol_rad, lid, center, theta, theta_arm[lid], phi,
phi_arm[lid], color=color, linewidth=linewidth
)
return
def plot_assignment_tile_file(petals, real_shapes, params):
(infile, outfile) = params
set_matplotlib_pdf_backend()
from matplotlib.patches import Patch
log = Logger.get()
if os.path.isfile(outfile):
log.info("Skipping existing plot {}".format(outfile))
return
else:
log.info("Creating {}".format(outfile))
header, fiber_data, targets_data, avail_data, gfa_data = \
read_assignment_fits_tile(infile)
tile_id = int(header["TILEID"])
tile_ra = float(header["TILERA"])
tile_dec = float(header["TILEDEC"])
tile_theta = float(header["FIELDROT"])
tile_obstime = header["FA_PLAN"]
tile_obsha = float(header["FA_HA"])
run_date = header["FA_RUN"]
# Retrieve additional margins to add to positioner, petal & gfa exclusion
# polygons from header cards.
margins = {}
for key in ['pos', 'gfa', 'petal']:
hdrkey = "FA_M_%s" % key[:3]
if hdrkey in header:
margins[key] = header[hdrkey]
log.debug('Read exclusion polygon margins from header: %s' % str(margins))
hw = load_hardware(rundate=run_date, add_margins=margins)
locs = None
if petals is None:
locs = [x for x in hw.locations]
else:
locs = list()
for p in petals:
locs.extend([x for x in hw.petal_locations[p]])
locs = np.array(locs)
tavail = avail_table_to_dict(avail_data)
log.debug(" tile {} at RA/DEC {} / {} with rotation {}".format(
tile_id, tile_ra, tile_dec, tile_theta)
)
fig = plt.figure(figsize=(12, 12))
ax = fig.add_subplot(1, 1, 1)
ax.set_aspect("equal")
# Target properties (x, y, color) for plotting
tgs = Targets()
tagalong = create_tagalong(plate_radec=False)
if "FA_SURV" in header:
load_target_table(tgs, tagalong, targets_data,
survey=str(header["FA_SURV"]).rstrip(),
typecol="FA_TYPE")
else:
load_target_table(tgs, tagalong, targets_data)
targetprops = plot_tile_targets_props(hw, tagalong,
tile_ra, tile_dec,
tile_obstime, tile_theta, tile_obsha,
tgs)
log.debug(" tile {} has {} targets with properties"
.format(tile_id, len(targets_data)))
# When plotting available targets, we only consider those which have
# RA/DEC information. Depending on how the assignment was written out,
# this might include only assigned targets or all targets available to
# the tile.
# Available targets for our selected fibers.
avtg_locs = [f for f in locs if f in tavail]
avtg_ids = np.unique([x for f in avtg_locs for x in tavail[f]])
# Downselect to include only targets with properties in the file.
avtg = avtg_ids[np.isin(avtg_ids, targets_data["TARGETID"],
assume_unique=True)]
plot_available(ax, targetprops, avtg, linewidth=0.1)
# Assigned targets for our selected fibers. We handle the special case of fibers
# being used as sky but not formally assigned to a target.
tassign = {
x["LOCATION"]: (x["TARGETID"], (x["FA_TYPE"] & TARGET_TYPE_SKY))
for x in fiber_data if (x["LOCATION"] in locs)
}
log.debug(" tile {} plotting {} assigned fibers"
.format(tile_id, len(tassign)))
fassign = {f: tassign[f] if f in tassign else (-1, False) for f in locs}
plot_assignment(
ax,
hw,
targetprops,
fassign,
linewidth=0.1,
real_shapes=real_shapes
)
ax.legend(
handles=[
Patch(color="red", label="Science"),
Patch(color="gold", label="Standard"),
Patch(color="green", label="Standard & Science"),
Patch(color="blue", label="Sky or Supp. Sky"),
Patch(color="black", label="Safe (BAD_SKY)"),
Patch(color="cyan", label="Stuck on Sky"),
Patch(color="gray", label="Stuck or Fiber Broken"),
Patch(color="fuchsia", label="Working & Unassigned"),
],
loc=1
)
ax.set_xlabel("Curved Focal Surface Millimeters", fontsize="large")
ax.set_ylabel("Curved Focal Surface Millimeters", fontsize="large")
plt.savefig(outfile, dpi=300, format="pdf")
plt.close()
return
def plot_tiles(files, out_dir=None, petals=None, real_shapes=False, serial=False):
"""Plot assignment output.
Args:
files (list): The list of fiberassign files.
out_dir (str): Output directory for plots.
petals (list): List of petals to plot.
real_shapes (bool): If True, plot the full positioner shapes.
serial (bool): If True, disable use of multiprocessing.
Returns:
None.
"""
log = Logger.get()
log.info("Plotting {} fiberassign tile files".format(len(files)))
plot_tile = partial(plot_assignment_tile_file, petals, real_shapes)
if (out_dir is not None) and not os.path.isdir(out_dir):
os.makedirs(out_dir)
file_map_list = list()
for f in files:
d, base = os.path.split(f)
if out_dir is not None:
d = out_dir
root = base.split(".")[0]
file_map_list.append((f, os.path.join(d, "{}.pdf".format(root))))
if serial:
for params in file_map_list:
plot_tile(params)
else:
with mp.Pool(processes=default_mp_proc) as pool:
pool.map(plot_tile, file_map_list)
return
def plot_assignment_tile(hw, tgs, tagalong, tile_id, tile_ra, tile_dec,
tile_obstime, tile_theta, tile_obsha,
tile_assign, tile_avail=None, petals=None,
real_shapes=False, outfile=None, figsize=8):
set_matplotlib_pdf_backend()
# Get selected fibers
locs = None
if petals is None:
locs = [x for x in hw.locations]
else:
locs = list()
for p in petals:
locs.extend([x for x in hw.petal_locations[p]])
locs = np.array(locs)
# Available targets for our selected fibers.
avtg_locs = None
avtg_ids = None
if tile_avail is None:
# Just plot assigned targets
avtg_locs = [f for f in locs if f in tile_assign]
avtg_ids = [tile_assign[f] for f in avtg_locs]
else:
# Plot all available targets
avtg_locs = [f for f in locs if f in tile_avail]
avtg_ids = np.unique([x for f in avtg_locs for x in tile_avail[f]])
# Target properties
targetprops = plot_tile_targets_props(hw, tagalong, tile_ra, tile_dec,
tile_obstime, tile_theta, tile_obsha,
tgs, avail_tgid=avtg_ids)
fig = plt.figure(figsize=(figsize, figsize))
ax = fig.add_subplot(1, 1, 1)
ax.set_aspect("equal")
plot_available(ax, targetprops, avtg_ids, linewidth=0.1)
# Assigned targets for our selected fibers
tassign = {x: tile_assign[x] for x in locs if x in tile_assign}
fassign = {f: tassign[f] if f in tassign else -1 for f in locs}
plot_assignment(ax, hw, targetprops, fassign,
linewidth=0.1, real_shapes=real_shapes)
ax.set_xlabel("Curved Focal Surface Millimeters", fontsize="large")
ax.set_ylabel("Curved Focal Surface Millimeters", fontsize="large")
if outfile is None:
plt.show()
else:
plt.savefig(outfile, dpi=300, format="pdf")
plt.close()
return
def plot_qa_tile_color(desired, value, incr):
des_color = "green"
low_one_color = "gold"
low_two_color = "red"
low_color = "black"
high_color = "cyan"
if value == desired:
return des_color
if value > desired:
return high_color
if value < (desired - 2 * incr):
return low_color
if value < (desired - incr):
return low_two_color
return low_one_color
def plot_qa(data, outroot, outformat="pdf", labels=False):
"""Make plots of QA data.
"""
set_matplotlib_pdf_backend()
# Imported here, to ensure that the backend has been set.
from matplotlib.patches import Patch
hw = load_hardware()
tile_radius = hw.focalplane_radius_deg
fontpt = 1
linewidth = 0.1
fig = plt.figure(figsize=(12, 10))
plot_param = [
("Total Fibers Assigned Per Tile", ["assign_total"], 5000, 5),
("Standards Assigned Per Tile", ["assign_std"], 100, 2),
("Sky Assigned Per Tile", ["assign_sky", "assign_suppsky"], 400, 2),
]
pindx = 1
for title, key, desired, incr in plot_param:
ax = fig.add_subplot(3, 1, pindx)
ax.set_aspect("equal")
xmin = 360.0
xmax = 0.0
ymin = 90.0
ymax = -90.0
for tid, props in data.items():
xcent = props["tile_ra"]
ycent = props["tile_dec"]
if xcent > xmax:
xmax = xcent
if xcent < xmin:
xmin = xcent
if ycent > ymax:
ymax = ycent
if ycent < ymin:
ymin = ycent
keytot = np.sum([props[x] for x in key])
color = plot_qa_tile_color(desired, keytot, incr)
circ = plt.Circle((xcent, ycent), radius=tile_radius, fc="none",
ec=color, linewidth=linewidth)
ax.add_artist(circ)
if labels:
ax.text(xcent, ycent, "{}".format(tid),
color=color, fontsize=fontpt,
horizontalalignment='center',
verticalalignment='center',
bbox=None)
margin = 1.1 * tile_radius
xmin -= margin
xmax += margin
ymin -= margin
ymax += margin
if xmin < 0.0:
xmin = 0.0
if xmax > 360.0:
xmax = 360.0
if ymin < -90.0:
ymin = -90.0
if ymax > 90.0:
ymax = 90.0
ax.set_xlim(left=xmin, right=xmax)
ax.set_ylim(bottom=ymin, top=ymax)
ax.set_xlabel("RA (degrees)", fontsize="large")
ax.set_ylabel("DEC (degrees)", fontsize="large")
ax.set_title(title)
c_high = plot_qa_tile_color(desired, desired+1, incr)
c_exact = plot_qa_tile_color(desired, desired, incr)
c_low_one = plot_qa_tile_color(desired, desired-incr, incr)
c_low_two = plot_qa_tile_color(desired, desired-2*incr, incr)
c_low = plot_qa_tile_color(desired, 0, incr)
c_low_two_val = desired - incr
c_low_val = desired - 2 * incr
legend_elements = [
Patch(facecolor=c_high, edgecolor="none",
label="> {} assigned".format(desired)),
Patch(facecolor=c_exact, edgecolor="none",
label="Exactly {} assigned".format(desired)),
Patch(facecolor=c_low_one, edgecolor="none",
label="< {} assigned".format(desired)),
Patch(facecolor=c_low_two, edgecolor="none",
label="< {} assigned".format(c_low_two_val)),
Patch(facecolor=c_low, edgecolor="none",
label="< {} assigned".format(c_low_val)),
]
ax.legend(handles=legend_elements, loc="best",
fontsize="x-small")
pindx += 1
outfile = "{}.{}".format(outroot, outformat)
plt.savefig(outfile, dpi=300, format="pdf")
return
| bsd-3-clause |
tcm129/trading-with-python | cookbook/getDataFromYahooFinance.py | 77 | 1391 | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 16 18:37:23 2011
@author: jev
"""
from urllib import urlretrieve
from urllib2 import urlopen
from pandas import Index, DataFrame
from datetime import datetime
import matplotlib.pyplot as plt
sDate = (2005,1,1)
eDate = (2011,10,1)
symbol = 'SPY'
fName = symbol+'.csv'
try: # try to load saved csv file, otherwise get from the net
fid = open(fName)
lines = fid.readlines()
fid.close()
print 'Loaded from ' , fName
except Exception as e:
print e
urlStr = 'http://ichart.finance.yahoo.com/table.csv?s={0}&a={1}&b={2}&c={3}&d={4}&e={5}&f={6}'.\
format(symbol.upper(),sDate[1]-1,sDate[2],sDate[0],eDate[1]-1,eDate[2],eDate[0])
print 'Downloading from ', urlStr
urlretrieve(urlStr,symbol+'.csv')
lines = urlopen(urlStr).readlines()
dates = []
data = [[] for i in range(6)]
#high
# header : Date,Open,High,Low,Close,Volume,Adj Close
for line in lines[1:]:
fields = line.rstrip().split(',')
dates.append(datetime.strptime( fields[0],'%Y-%m-%d'))
for i,field in enumerate(fields[1:]):
data[i].append(float(field))
idx = Index(dates)
data = dict(zip(['open','high','low','close','volume','adj_close'],data))
# create a pandas dataframe structure
df = DataFrame(data,index=idx).sort()
df.plot(secondary_y=['volume'])
| bsd-3-clause |
jabl/vasputil | vasputil/dos.py | 1 | 4093 | # -*- coding: utf-8 -*-
# vim: set fileencoding=utf-8
# Copyright (c) 2008-2016 Janne Blomqvist
# This source code file is subject to the terms of the LGPL 2.1
# License. See the file LICENSE for details.
"""Module for doing stuff with density-of-states."""
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import sys
import warnings
class LDOS(object):
"""Class for representing a set of local DOS.
DOS data is stored in the instance variable self.dos, which is a 3D
ndarray, as follows:
1st dim: Which atom.
2nd dim: Selects the DOS grid point.
3rd dim: 0 is the energy, 1-3 s, p, d DOS.
"""
def __init__(self, doscar="DOSCAR", efermi=0.0):
"""Initialize LDOS."""
warnings.warn('vasputil.dos.LDOS is deprecated, please use \
ase.calculators.vasp.VaspDos instead, which is an improved version if this \
class', DeprecationWarning)
self._efermi = 0.0
self.read_doscar(doscar)
self.efermi = efermi
def read_doscar(self, fname="DOSCAR"):
"""Read a VASP DOSCAR file."""
f = open(fname)
natoms = int(f.readline().split()[0])
[f.readline() for nn in range(4)] # Skip next 4 lines.
dos = []
for na in xrange(natoms + 1):
try:
line = f.readline()
if line == "":
raise Exception
except Exception:
errstr = "Failed reading " + str(na) + ":th DOS block, probably " \
+ "this DOSCAR is from some old version of VASP that " \
+ "doesn't " \
+ "first produce a block with integrated DOS. Inserting " \
+ "empty 0:th block."
sys.stderr.write(errstr)
dos.insert(0, np.zeros((ndos, dos[1].shape[1])))
continue
try:
ndos = int(line.split()[2])
except Exception:
print("Error, line is: " + line + "ENDLINE")
line = f.readline().split()
cdos = np.zeros((ndos, len(line)))
cdos[0] = np.array(line)
for nd in xrange(1, ndos):
line = f.readline().split()
cdos[nd] = np.array(line)
dos.append(cdos)
f.close()
if dos[0].shape != dos[1].shape:
dos0 = np.zeros(dos[1].shape)
dos0[:,:dos[0].shape[1]] = dos[0]
dos[0] = dos0
self.dos = np.array(dos)
def _set_efermi(self, efermi):
"""Set the Fermi level."""
if self._efermi != 0.0:
self.dos[:,:,0] = self.dos[:,:,0] + self._efermi
self._efermi = efermi
self.dos[:,:,0] = self.dos[:,:,0] - efermi
def _get_efermi(self):
return self._efermi
def _del_efermi(self):
raise AttributeError("Can't delete attribute.")
efermi = property(_get_efermi, _set_efermi, _del_efermi, "Fermi energy.")
def get_energygrid(self):
"""Return the array with the energies."""
return self.dos[1, :, 0]
def get_dos(self, atom, orbital):
"""Return an NDOSx1 array with dos for the chosen atom and orbital.
If spin-unpolarized calculation, no phase factors:
s = 1, p = 2, d = 3
Spin-polarized, no phase factors:
s-up = 1, s-down = 2, p-up = 3, p-down = 4, d-up = 5, d-down = 6
If phase factors have been calculated, orbitals are
s, py, pz, px, dxy, dyz, dz2, dxz, dx2
double in the above fashion if spin polarized.
"""
return self.dos[atom, :, orbital]
def set_labels(fig=None):
"""Utility function to set default parameters for DOS plots."""
plt.xlabel("E-E$_\mathrm{f}$ (eV)")
plt.figtext(0., 0.35, "LDOS (arb. units)", rotation='vertical')
if fig is not None:
# Looks good in publication plots
fig.subplots_adjust(hspace=0.0, left=0.15, bottom=0.12)
else:
# This might be better for on-screen viewing
plt.subplots_adjust(hspace=0.0)
| lgpl-2.1 |
mikeireland/pynrm | pynrm/aoinstrument.py | 1 | 13805 | # -*- coding: utf-8 -*-
"""Useful utilities that are not telescope-dependent.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import astropy.io.fits as pyfits
import csv
class AOInstrument:
"""The AOInstrument Class
"""
#A blank dictionary on startup.
csv_dict = dict()
#Blank reduction, cube. and data directories on startup.
rdir = ''
ddir = ''
cdir = ''
def read_summary_csv(self, filename='datainfo.csv',ddir=''):
"""Read the data from local file into a csv_dict structure.
Notes
-----
At the moment, all data types are strings. It would work better if
the second line of the CSV file was the data type.
"""
#Allow over-riding default data directory.
if (ddir == ''):
ddir = self.ddir
try:
f = open(ddir + filename)
except:
print ("Error: file doesn't exist " + ddir + filename)
raise UserWarning
r = csv.DictReader(f, delimiter=',')
#Read first line to initiate the dictionary
line = next(r)
d = dict()
for k in line.keys():
d[k] = [line[k]]
#Read the rest of the file
for line in r:
for k in line.keys():
d[k].append(line[k])
#Convert to numpy arrays
for k in line.keys():
d[k] = np.array(d[k])
f.close()
self.csv_dict = d
def make_all_darks(self, ddir='', rdir=''):
"""Make all darks in a current directory. This skeleton routine assumes that
keywords "SHRNAME", "NAXIS1" and "NAXIS2" exist.
"""
#Allow over-riding default reduction and data directories.
if (rdir == ''):
rdir = self.rdir
if (ddir == ''):
ddir = self.ddir
darks = np.where(np.array(n2.csv_dict['SHRNAME']) == 'closed')[0]
#Now we need to find unique values of the following:
#NAXIS1, NAXIS2 (plus for nirc2... ITIME, COADDS, MULTISAM)
codes = []
for d in darks:
codes.append(self.csv_dict['NAXIS1'][d] + self.csv_dict['NAXIS2'][d])
codes = np.array(codes)
#For each unique code, find all dark files and call make_dark.
for c in np.unique(codes):
w = np.where(codes == c)[0]
#Only bother if there are at least 3 files.
if (len(w) >= 3):
files = [ddir + self.csv_dict['FILENAME'][darks[ww]] for ww in w]
self.make_dark(files, rdir=rdir)
def make_dark(self,in_files, out_file='dark.fits', rdir=''):
"""This is a basic method to make a dark from several files. It is
generally expected to be over-ridden in derived classes.
Parameters
----------
in_files : array_like (dtype=string). A list if input filenames.
out_file: string
The file to write to.
"""
#Allow over-riding default reduction directory.
if (rdir == ''):
rdir = self.rdir
nf = len(in_files)
in_fits = pyfits.open(in_files[0], ignore_missing_end=True)
adark = in_fits[0].data
in_fits.close()
s = adark.shape
darks = np.zeros((nf,s[0],s[1]))
for i in range(nf):
#Read in the data, linearizing as a matter of principle, and also because
#this routine is used for
in_fits = pyfits.open(in_files[i], ignore_missing_end=True)
adark = in_fits[0].data
in_fits.close()
darks[i,:,:] = adark
med_dark = np.median(darks, axis=0)
pyfits.writeto(rdir + out_file, med_dark, output_verify='ignore')
def info_from_header(self, h):
"""Find important information from the fits header and store in a common format
Prototype function only - to be over-ridden in derived classes.
Parameters
----------
h: The fits header
Returns
-------
(dark_file, flat_file, filter, wave, rad_pixel)
"""
try: filter = h['FILTER']
except:
print ("No FILTER in header")
try: wave = h['WAVE']
except:
print ("No WAVE in header")
try: rad_pix = h['RAD_PIX']
except:
print ("No RAD_PIX in header")
try: targname = h['TARGET']
except:
print ("No TARGET in header")
return {'dark_file':'dark.fits', 'flat_file':'flat.fits', 'filter':filter,
'wave':wave, 'rad_pixel':rad_pixel,'targname':targname,'pupil_type':'circ','pupil_params':dict()}
def mod2pi(self,angle):
""" Convert an angle to the range (-pi,pi)
Parameters
----------
angle: float
input angle
Returns
-------
angle: float
output angle after the mod2pi operation
"""
return np.remainder(angle + np.pi,2*np.pi) - np.pi
def make_flat(self,in_files, rdir='', out_file='', dark_file='', wave=0.0):
"""Create a flat frame and save to a fits file,
with an attached bad pixel map as the first fits extension.
Parameters
----------
in_files : array_like (dtype=string). A list if input filenames.
dark_file: string
The dark file, previously created with make_dark
out_file: string
The file to write to
rdir: Over-writing the default reduction directory.
Returns
-------
Nothing.
"""
#Allow over-riding default reduction directory.
if (rdir == ''):
rdir = self.rdir
#Create a default flat filename from the input files
try:
in_fits = pyfits.open(in_files[0], ignore_missing_end=True)
except:
in_fits = pyfits.open(in_files[0]+'.gz', ignore_missing_end=True)
h = in_fits[0].header
hinfo = self.info_from_header(h)
if (out_file == ''):
out_file = hinfo['flat_file']
#We use the make_dark function to average our flats. NB we don't destripe.
self.make_dark(in_files, rdir=rdir, out_file=out_file, subtract_median=False, destripe=False, med_threshold=15.0)
#Now extract the key parts.
h = pyfits.getheader(rdir + out_file)
#Add a wavelength to the header
h['WAVE'] = hinfo['wave']
if (dark_file ==''):
dark_file=self.get_dark_filename(h)
#FIXME: A hack if get_dark_filename returns a non existant file.
#FIXME: This should be incorporated into get_dark_filename if it is necessary, or
# otherwise give an error.
if not os.path.isfile(rdir + dark_file):
allDarks = [f for f in os.listdir(rdir) if 'dark' in f]
if 'EXPTIME' in h.keys():
exptime = h['EXPTIME']*100
elif 'ITIME' in h.keys():
exptime = h['ITIME']*100
allTimes = []
for ii in range(len(allDarks)):
count = 0
for jj in range(len(allDarks[ii])):
if allDarks[ii][jj]=='_':
count+=1
if count==2:
index = jj+1
allTimes.append(int(allDarks[ii][index:allDarks[ii].find('.fits')]))
allTimes = np.array(allTimes)
diffTimes = abs(allTimes-exptime)
dark_file = allDarks[np.argmin(diffTimes)]
#Subtract the dark and normalise the flat.
flat = pyfits.getdata(rdir + out_file,0) - pyfits.getdata(rdir + dark_file,0)
bad = np.logical_or(pyfits.getdata(rdir + out_file,1),pyfits.getdata(rdir + dark_file,1))
flat[np.where(bad)] = np.median(flat)
flat /= np.median(flat)
#Write this to a file
hl = pyfits.HDUList()
hl.append(pyfits.ImageHDU(flat,h))
hl.append(pyfits.ImageHDU(np.uint8(bad)))
hl.writeto(rdir + out_file,clobber=True)
plt.figure(1)
plt.imshow(flat, cmap=cm.gray, interpolation='nearest')
plt.title('Flat')
def fix_bad_pixels(self,im,bad,fmask):
"""Fix the bad pixels, using a Fourier technique that adapts to the
sampling of each particular pupil/filter combination.
Parameters
----------
im : (N,N) array (dtype=float)
An image, already chopped to the subarr x subarr size.
bad: (N,N) array (dtype=int)
A bad pixel map
fmask: (N,N) array (dtype=int)
A mask containing the region in the Fourier plane where there is
no expected signal.
Returns
-------
The image with bad pixel values optimally corrected.
"""
# fmask defines the null space of the image Fourier transform.
wft = np.where(fmask)
# Where the bad pixel array is non-zero.
w = np.where(bad)
# The bad matrix should map the bad pixels to the real and imaginary
# parts of the null space of the image Fourier transform
badmat = np.zeros((len(w[0]),len(wft[0])*2))
#print("Bad matrix shape: " + str(badmat.shape))
# Create a uv grid. Math should be correct here, but the second vector could be
# 2*np.pi*np.arange(im.shape[0])/float(im.shape[0]) and it would still work.
xy = np.meshgrid(2*np.pi*np.arange(im.shape[1]//2 + 1)/float(im.shape[1]),
2*np.pi*(((np.arange(im.shape[0]) + im.shape[0]//2) % im.shape[0]) - im.shape[0]//2)/float(im.shape[0]))
for i in range(len(w[0])):
# Avoiding the fft is marginally faster here...
bft = np.exp(-1j*(w[0][i]*xy[1] + w[1][i]*xy[0]))
badmat[i,:] = np.append(bft[wft].real, bft[wft].imag)
#A dodgy pseudo-inverse that needs an "invert" is faster than the la.pinv function
#Unless things are really screwed, the matrix shouldn't be singular.
hb = np.transpose(np.conj(badmat))
ibadmat = np.dot(hb,np.linalg.inv(np.dot(badmat,hb)))
#Now find the image Fourier transform on the "zero" region in the Fourier plane
#To minimise numerical errors, set the bad pixels to zero at the start.
im[w]=0
ftimz = (np.fft.rfft2(im))[wft]
# Now compute the bad pixel corrections. (NB a sanity check here is
# that the imaginary part really is 0)
addit = -np.real(np.dot(np.append(ftimz.real, ftimz.imag),ibadmat))
#FIXIT
#We would rather use linalg.solve than an inversion!
#addit2 =
#import pdb; pdb.set_trace()
# ibadmat = np.solve(
# plt.clf()
# plt.plot(np.real(np.dot(ftimz,ibadmat)), np.imag(np.dot(ftimz,ibadmat)))
# raise UserWarning
im[w] += addit
return im
def regrid_fft(self,im,new_shape, fmask=[]):
"""Regrid onto a larger number of pixels using an fft. This is optimal
for Nyquist sampled data.
Parameters
----------
im: array
The input image.
new_shape: (new_y,new_x)
The new shape
Notes
------
TODO: This should work with an arbitrary number of dimensions
"""
ftim = np.fft.rfft2(im)
if len(fmask) > 0:
ftim[np.where(fmask)] = 0
new_ftim = np.zeros((new_shape[0], new_shape[1]/2 + 1),dtype='complex')
new_ftim[0:ftim.shape[0]/2,0:ftim.shape[1]] = \
ftim[0:ftim.shape[0]/2,0:ftim.shape[1]]
new_ftim[new_shape[0]-ftim.shape[0]/2:,0:ftim.shape[1]] = \
ftim[ftim.shape[0]/2:,0:ftim.shape[1]]
return np.fft.irfft2(new_ftim)
def hexagon(self, dim, width):
"""This function creates a hexagon.
Parameters
----------
dim: int
Size of the 2D array
width: int
flat-to-flat width of the hexagon
Returns
-------
pupil: float array (sz,sz)
2D array hexagonal pupil mask
"""
x = np.arange(dim)-dim/2.0
xy = np.meshgrid(x,x)
xx = xy[1]
yy = xy[0]
w = np.where( (yy < width/2) * (yy > (-width/2)) * \
(yy < (width-np.sqrt(3)*xx)) * (yy > (-width+np.sqrt(3)*xx)) * \
(yy < (width+np.sqrt(3)*xx)) * (yy > (-width-np.sqrt(3)*xx)))
hex = np.zeros((dim,dim))
hex[w]=1.0
return hex
def rebin(self,a, shape):
"""Re-bins an image to a new (smaller) image with summing
Originally from:
http://stackoverflow.com/questions/8090229/resize-with-averaging-or-rebin-a-numpy-2d-array
Parameters
----------
a: array
Input image
shape: (xshape,yshape)
New shape
"""
sh = shape[0],a.shape[0]//shape[0],shape[1],a.shape[1]//shape[1]
return a.reshape(sh).sum(-1).sum(1)
def shift_and_ft(self,im, ftpix=(),regrid_factor=3):
"""Sub-pixel shift an image to the origin and Fourier-transform it
Parameters
----------
im: (ny,nx) float array
ftpix: optional ( (nphi) array, (nphi) array) of Fourier sampling points.
If included, the mean square Fourier phase will be minimised.
Returns
----------
ftim: (ny,nx/2+1) complex array
"""
ny = im.shape[0]
nx = im.shape[1]
if len(ftpix)==0:
#Regrid onto a finer array
im = self.regrid_fft(im,(regrid_factor*ny,regrid_factor*nx))
#Find the peak
shifts = np.unravel_index(im.argmax(), im.shape)
#Shift to the peak (noting that given we're about to rebin... we need to offset
#by half of the regrid_factor)
im = np.roll(np.roll(im,-shifts[0]+regrid_factor//2,axis=0),-shifts[1]+regrid_factor//2,axis=1)
#Rebin and FFT!
im = self.rebin(im,(ny,nx))
ftim = np.fft.rfft2(im)
else:
#Shift to the origin within a pixel and Fourier transform
shifts = np.unravel_index(im.argmax(), im.shape)
im = np.roll(np.roll(im,-shifts[0]+1,axis=0),-shifts[1]+1,axis=1)
ftim = np.fft.rfft2(im)
#Find the Fourier phase
arg_ftpix = np.angle(ftim[ftpix])
#Compute phase in radians per Fourier pixel
vcoord = ((ftpix[0] + ny/2) % ny)- ny/2
ucoord = ftpix[1]
#Project onto the phase ramp in each direction, and remove this phase ramp
#from the data.
vphase = np.sum(arg_ftpix * vcoord)/np.sum(vcoord**2)
uphase = np.sum(arg_ftpix * ucoord)/np.sum(ucoord**2)
uv = np.meshgrid(np.arange(nx/2 + 1), ((np.arange(ny) + ny/2) % ny) - ny/2 )
ftim = ftim*np.exp(-1j * uv[0] * uphase - 1j*uv[1]*vphase)
return ftim
| mit |
perryjohnson/biplaneblade | sandia_blade_lib/plot_selected_stations.py | 1 | 1184 | """Plot structural parts for selected stations in the Sandia blade.
Usage
-----
start an IPython (qt)console with the pylab flag:
$ ipython qtconsole --pylab
or
$ ipython --pylab
Then, from the prompt, run this script:
|> %run sandia_blade_lib/plot_selected_stations
Once you are finished looking at the meshes, you can clean up extra files:
|> %run clean
(See the 'clean.py' script in this directory for details.)
Author: Perry Roth-Johnson
Last updated: April 14, 2014
"""
import matplotlib.pyplot as plt
import numpy as np
import lib.blade as bl
reload(bl)
# load the sandia blade
m = bl.MonoplaneBlade('Sandia blade SNL100-00', 'sandia_blade')
# pre-process the airfoil coordinates
for station in m.list_of_stations:
station.airfoil.create_polygon()
station.structure.create_all_layers()
station.structure.save_all_layer_edges()
# station.structure.create_all_alternate_layers()
# station.structure.write_truegrid_inputfile(interrupt_flag=True)
station.structure.write_all_part_polygons()
m.plot_selected_cross_sections()
# for station in m.list_of_stations:
# station.plot_parts(alternate_layers=False)
| gpl-3.0 |
DamienIrving/ocean-analysis | visualisation/plot_drift.py | 1 | 9514 | """
Filename: plot_drift.py
Author: Damien Irving, [email protected]
Description: Visualise the de-drifting process
"""
# Import general Python modules
import sys
import os
import re
import pdb
import argparse
import numpy
import matplotlib.pyplot as plt
import iris
import cf_units
import cmdline_provenance as cmdprov
# Import my modules
cwd = os.getcwd()
repo_dir = '/'
for directory in cwd.split('/')[1:]:
repo_dir = os.path.join(repo_dir, directory)
if directory == 'ocean-analysis':
break
modules_dir = os.path.join(repo_dir, 'modules')
sys.path.append(modules_dir)
processing_dir = os.path.join(repo_dir, 'data_processing')
sys.path.append(processing_dir)
import timeseries
import remove_drift
import general_io as gio
# Define functions
def get_title(infile, var, index_list):
"""Get the plot title."""
cube = iris.load_cube(infile, gio.check_iris_var(var))
title = ''
coord_names = [coord.name() for coord in cube.dim_coords]
for posnum, index in enumerate(index_list):
point_name = coord_names[posnum + 1]
point_value = cube.coord(point_name).points[index]
title = f"{title} {point_name}: {point_value};"
return title
def select_point(cube, index_list, timeseries=False):
"""Select a given grid point."""
if timeseries:
assert len(index_list) == cube.ndim - 1
else:
assert len(index_list) == cube.ndim
if len(index_list) == 1:
a = index_list[0]
point = cube[:, a] if timeseries else cube[a]
elif len(index_list) == 2:
a, b = index_list
point = cube[:, a, b] if timeseries else cube[a, b]
elif len(index_list) == 3:
a, b, c = index_list
point = cube[:, a, b, c] if timeseries else cube[a, b, c]
return point
def read_data(file_list, var, grid_point, convert_to_annual=False):
"""Read input data."""
cube, history = gio.combine_files(file_list, var)
if grid_point:
cube = select_point(cube, grid_point, timeseries=True)
if convert_to_annual:
cube = timeseries.convert_to_annual(cube)
return cube, history[0]
def cubic_fit(infile, grid_point, time_axis):
"""Get the cubic polynomial."""
a_cube = iris.load_cube(infile, 'coefficient a')
a_cube = select_point(a_cube, grid_point)
b_cube = iris.load_cube(infile, 'coefficient b')
b_cube = select_point(b_cube, grid_point)
c_cube = iris.load_cube(infile, 'coefficient c')
c_cube = select_point(c_cube, grid_point)
d_cube = iris.load_cube(infile, 'coefficient d')
d_cube = select_point(d_cube, grid_point)
numpy_poly = numpy.poly1d([float(d_cube.data),
float(c_cube.data),
float(b_cube.data),
float(a_cube.data)])
cubic_data = numpy_poly(time_axis)
return cubic_data, a_cube
def main(inargs):
"""Run the program."""
metadata_dict = {}
# Read data
control_cube, control_history = read_data(inargs.control_files, inargs.variable,
inargs.grid_point, convert_to_annual=inargs.annual)
metadata_dict[inargs.control_files[0]] = control_history
coord_names = [coord.name() for coord in control_cube.dim_coords]
time_var = coord_names[0]
assert time_var in ['time', 'year']
experiment_cube, experiment_history = read_data(inargs.experiment_files, inargs.variable,
inargs.grid_point, convert_to_annual=inargs.annual)
metadata_dict[inargs.experiment_files[0]] = experiment_history
if inargs.dedrifted_files:
dedrifted_cube, dedrifted_history = read_data(inargs.dedrifted_files, inargs.variable,
inargs.grid_point, convert_to_annual=inargs.annual)
metadata_dict[inargs.dedrifted_files[0]] = dedrifted_history
if inargs.coefficient_file:
cubic_data, a_cube = cubic_fit(inargs.coefficient_file, inargs.grid_point,
control_cube.coord(time_var).points)
#TODO: coeff metadata
# Time axis adjustment
if time_var == 'time':
first_data_cube = iris.load_cube(inargs.experiment_files[0], gio.check_iris_var(inargs.variable))
if inargs.grid_point:
first_data_cube = select_point(first_data_cube, inargs.grid_point, timeseries=True)
if inargs.annual:
first_data_cube = timeseries.convert_to_annual(first_data_cube)
timescale = 'annual'
offset = 182.5
else:
timescale = 'monthly'
offset = 15.5
time_diff, branch_time, new_time_unit = remove_drift.time_adjustment(first_data_cube, control_cube, timescale,
branch_time=inargs.branch_time)
print(f'branch time: {branch_time - offset}')
time_coord = experiment_cube.coord('time')
time_coord.convert_units(new_time_unit)
experiment_time_values = time_coord.points.astype(numpy.float32) - time_diff
elif time_var == 'year':
if not inargs.branch_year == None:
branch_year = inargs.branch_year
else:
if not inargs.control_time_units:
control_time_units = gio.fix_time_descriptor(experiment_cube.attributes['parent_time_units'])
else:
control_time_units = inargs.control_time_units.replace("_", " ")
branch_time = experiment_cube.attributes['branch_time_in_parent']
branch_datetime = cf_units.num2date(branch_time, control_time_units, cf_units.CALENDAR_STANDARD)
branch_year = branch_datetime.year
print(f'branch year: {branch_year}')
experiment_time_values = numpy.arange(branch_year, branch_year + experiment_cube.shape[0])
# Plot
fig = plt.figure(figsize=[14, 7])
plt.plot(control_cube.coord(time_var).points, control_cube.data, label='control')
plt.plot(experiment_time_values, experiment_cube.data, label='experiment')
if inargs.dedrifted_files:
plt.plot(experiment_time_values, dedrifted_cube.data, label='dedrifted')
if inargs.coefficient_file:
plt.plot(control_cube.coord(time_var).points, cubic_data, label='cubic fit')
if inargs.outlier_threshold:
data, outlier_idx = timeseries.outlier_removal(control_cube.data, inargs.outlier_threshold)
plt.plot(control_cube.coord(time_var).points[outlier_idx], control_cube.data[outlier_idx],
marker='o', linestyle='none', color='r', alpha=0.3)
if inargs.ylim:
ymin, ymax = inargs.ylim
plt.ylim(ymin, ymax)
plt.ylabel(f"{gio.check_iris_var(inargs.variable)} ({control_cube.units})")
if time_var == 'time':
plt.xlabel(str(new_time_unit))
else:
plt.xlabel('control run year')
plt.legend()
if inargs.grid_point:
title = get_title(inargs.control_files[0], inargs.variable, inargs.grid_point)
plt.title(title)
# Save output
plt.savefig(inargs.outfile, bbox_inches='tight')
log_text = cmdprov.new_log(infile_history=metadata_dict, git_repo=repo_dir)
log_file = re.sub('.png', '.met', inargs.outfile)
cmdprov.write_log(log_file, log_text)
if __name__ == '__main__':
extra_info ="""
author:
Damien Irving, [email protected]
"""
description = 'Visualise the de-drifting process'
parser = argparse.ArgumentParser(description=description,
epilog=extra_info,
argument_default=argparse.SUPPRESS,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("variable", type=str, help="Variable")
parser.add_argument("outfile", type=str, help="Output file name")
parser.add_argument("--control_files", nargs='*', type=str, required=True,
help="control data files")
parser.add_argument("--experiment_files", nargs='*', type=str, required=True,
help="experiment data files")
parser.add_argument("--coefficient_file", type=str, default=None,
help="Drift coefficient file name")
parser.add_argument("--dedrifted_files", nargs='*', type=str, default=None,
help="dedrifted experiment data files")
parser.add_argument("--outlier_threshold", type=float, default=None,
help="Indicate points that were removed from control in drift calculation [default: None]")
parser.add_argument("--grid_point", type=int, nargs='*', default=None,
help="Array indexes for grid point to plot (e.g. 0 58 35)")
parser.add_argument("--ylim", type=float, nargs=2, metavar=('MIN', 'MAX'), default=None,
help="limits for y axis")
parser.add_argument("--branch_year", type=int, default=None, help="override metadata")
parser.add_argument("--branch_time", type=float, default=None, help="override metadata")
parser.add_argument("--control_time_units", type=str, default=None, help="override metadata (e.g. days_since_1850-01-01)")
parser.add_argument("--annual", action="store_true", default=False,
help="Apply annual smoothing [default=False]")
args = parser.parse_args()
main(args)
| mit |
luckyharryji/physical-modeling-band | push/moving.py | 1 | 3366 | import csv
import matplotlib.animation as animation
'''
timestamo: data[11]
linearacceleration x,y,z[12,13,14]
'''
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
def randrange(n, vmin, vmax):
return (vmax-vmin)*np.random.rand(n) + vmin
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# n = 100
# for c, m, zl, zh in [('r', 'o', -50, -25), ('b', '^', -30, -5)]:
# xs = randrange(n, 23, 32)
# ys = randrange(n, 0, 100)
# zs = randrange(n, zl, zh)
# ax.scatter(xs, ys, zs, c=c, marker=m)
# ax.set_xlabel('X Label')
# ax.set_ylabel('Y Label')
# ax.set_zlabel('Z Label')
#
# plt.show()
with open('signal1_data.csv','rb') as f_in:
reader = csv.reader(f_in)
raw_data = list(reader)
# print data[0]
for index,data in enumerate(raw_data):
if index==0:
continue
for index2,k in enumerate(data):
raw_data[index][index2]=float(raw_data[index][index2])
v_x=0
v_y=0
v_z=0
s_x=0
s_y=0
s_z=0
sports_result=list()
sports_x=[]
sports_y=[]
sports_z=[]
moving_time=[]
a_x=[]
a_y=[]
a_z=[]
v_x_list=[]
v_y_list=[]
v_z_list=[]
for index,data in enumerate(raw_data):
if index==0 or index==1:
continue
time=data[11] if index==1 else data[11]-raw_data[index-1][11]
time_x=s_x+v_x*time+(0.5)*raw_data[index-1][12]*(time**2)
time_y=s_y+v_y*time+(0.5)*raw_data[index-1][13]*(time**2)
time_z=s_z+v_z*time+(0.5)*raw_data[index-1][14]*(time**2)
sports_result.append([data[11],time_x,time_y,time_z])
a_x.append(raw_data[index-1][12])
a_y.append(raw_data[index-1][13])
a_z.append(raw_data[index-1][14])
v_x_list.append(v_x)
v_y_list.append(v_y)
v_z_list.append(v_z)
sports_x.append(time_x*10)
sports_y.append(time_y*10)
sports_z.append(time_z*10)
moving_time.append(data[11])
# if time_x<s_x or time_y<s_y or time_z<s_z:
# print "yes"
s_x=time_x
s_y=time_y
s_z=time_z
v_x+=raw_data[index-1][12]*time
v_y+=raw_data[index-1][13]*time
v_z+=raw_data[index-1][14]*time
# v_x=0
# v_y=0
# v_z=0
# print moving_time
def data_gen():
t = data_gen.t
cnt = 0
while cnt < len(sports_x):
cnt+=1
t += 0.05
print "postion of x, y, z: ",sports_x[cnt],sports_y[cnt],sports_z[cnt]
print "time: ",moving_time[cnt]
print "x accrleration: ",a_x[cnt]," y accrleration: ",a_y[cnt]," z accrleration: ",a_z[cnt]
print "x speed: ",v_x_list[cnt]," y speed: ",v_y_list[cnt]," z speed: ",v_z_list[cnt]
yield sports_x[cnt],sports_y[cnt],sports_z[cnt]
print "end"
data_gen.t = 0
xdata=[]
ydata=[]
zdata=[]
def run(data):
# update the data
x,y,z = data
# xdata.append(x)
# ydata.append(y)
# zdata.append(z)
# line.set_data(xdata, ydata)
ax.scatter([x],[y],[z],color='blue')
# ani = animation.FuncAnimation(fig, run, data_gen, blit=False, interval=1,
# repeat=False)
# ax.set_xlabel('X Label')
# ax.set_ylabel('Y Label')
# ax.set_zlabel('Z Label')
# plt.show()
ax.scatter(sports_x, sports_y, sports_z,color='blue')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.show()
# print "end"
# print sports_result
# import json
# out_put={'data':sports_result}
#
# with open('moving_track_1.json','wb') as f_out:
# f_out.write(json.dumps(out_put))
| mit |
ghchinoy/tensorflow | tensorflow/contrib/learn/python/learn/grid_search_test.py | 137 | 2035 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Grid search tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
from tensorflow.contrib.learn.python import learn
from tensorflow.python.platform import test
HAS_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if HAS_SKLEARN:
try:
# pylint: disable=g-import-not-at-top
from sklearn import datasets
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import accuracy_score
except ImportError:
HAS_SKLEARN = False
class GridSearchTest(test.TestCase):
"""Grid search tests."""
def testIrisDNN(self):
if HAS_SKLEARN:
random.seed(42)
iris = datasets.load_iris()
feature_columns = learn.infer_real_valued_columns_from_input(iris.data)
classifier = learn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3)
grid_search = GridSearchCV(
classifier, {'hidden_units': [[5, 5], [10, 10]]},
scoring='accuracy',
fit_params={'steps': [50]})
grid_search.fit(iris.data, iris.target)
score = accuracy_score(iris.target, grid_search.predict(iris.data))
self.assertGreater(score, 0.5, 'Failed with score = {0}'.format(score))
if __name__ == '__main__':
test.main()
| apache-2.0 |
aorzh/easy-tracker | start.py | 1 | 7349 | from os.path import expanduser, exists
from os import makedirs
import argparse
import time
from sqlalchemy import create_engine, and_
from models import Task, Base
from datetime import timedelta, datetime
from sqlalchemy.orm import sessionmaker
import re
import smartchart
home_dir = expanduser('~')
tracker_dir = home_dir + '/easy_tracker'
tracker_db = home_dir + '/easy_tracker/lite.db'
def main():
parser = argparse.ArgumentParser()
parser.add_argument('init', nargs='?', help="Can be init, start, stop, report, add-time, remove")
parser.add_argument('--category', '-c', help='Category like JIRA or Work or Private etc.')
parser.add_argument('--task', '-t', help='Task short description')
parser.add_argument('--days', '-d', help='How many days (optional for reports)')
parser.add_argument('--date_from', '-f', help='Start date. Format should be: dd-mm-yyyy')
parser.add_argument('--time_spent', '-s', help='Time spent. Format: XhYm')
parser.add_argument('--task_id', '-i', help='Id of task which need remove')
"""
Need add remove (by id)
Add statistic (pandas etc.)
"""
namespace = parser.parse_args()
if namespace.init == 'start':
if namespace.task is None or namespace.category is None:
print('Please add category and task. Use start --category="Your category" --task="Your task"')
else:
start(namespace.category, namespace.task)
if namespace.init == 'init':
init_tracker()
if namespace.init == 'stop':
stop()
if namespace.init == 'add-time':
if namespace.task is None or namespace.category is None or namespace.date_from is None \
or namespace.time_spent is None:
print('Missed some required arguments. Use --help for details')
else:
add_time(namespace.date_from, namespace.time_spent, namespace.category, namespace.task)
if namespace.init == 'report':
category = ''
task = ''
days = 0
if namespace.category:
category = namespace.category
if namespace.task:
task = namespace.task
if namespace.days:
days = namespace.days
report(**{'category': category, 'days': days, 'task': task})
if namespace.init == 'remove':
if namespace.task_id is None:
print('Do you know task ID which need remove? Try get report for additional information')
else:
remove_log(namespace.task_id)
if namespace.init == 'graf':
smartchart.graf()
def init_tracker():
if not exists(tracker_dir):
makedirs(tracker_dir)
engine = create_engine("sqlite:///" + tracker_db, echo=False)
Base.metadata.create_all(engine)
print('Created new database in %s. Run tracker with --help option for more information' % tracker_db)
def start(category, issue):
if validate_init() is True:
now = int(time.time())
engine = create_engine("sqlite:///" + tracker_db, echo=False)
Session = sessionmaker(bind=engine)
session = Session()
n = session.query(Task).order_by(Task.id.desc()).first()
if n is not None:
if n.start == n.stop:
print("Please stop last task first. Use stop command")
else:
task = Task(category, issue, now, now)
session.add(task)
session.commit()
session.close()
print('You working on task: %s ' % issue)
else:
task = Task(category, issue, now, now)
session.add(task)
session.commit()
session.close()
print('You working on task: %s ' % issue)
def stop():
if validate_init() is True:
now = int(time.time())
engine = create_engine("sqlite:///" + tracker_db, echo=False)
Session = sessionmaker(bind=engine)
session = Session()
n = session.query(Task).order_by(Task.id.desc()).first()
session.query(Task).filter(Task.id == n.id).update({'stop': now})
print(n.name + " stopped!")
session.commit()
session.close()
def remove_log(task_id):
if validate_init() is True:
engine = create_engine("sqlite:///" + tracker_db, echo=False)
Session = sessionmaker(bind=engine)
session = Session()
n = session.query(Task)
n.filter(Task.id == task_id).delete()
session.commit()
session.close()
def report(**kwargs):
if validate_init() is True:
engine = create_engine("sqlite:///" + tracker_db, echo=False)
Session = sessionmaker(bind=engine)
session = Session()
n = session.query(Task)
for key in kwargs:
current_time = datetime.utcnow()
if key == 'days' and kwargs[key] != '':
days_ago = current_time - timedelta(days=int(kwargs[key]))
n = n.filter(and_(Task.start >= int(datetime.timestamp(days_ago))))
if key == 'category' and kwargs[key] != '':
n = n.filter(and_(Task.category == kwargs[key]))
if key == 'task' and kwargs[key] != '':
n = n.filter(and_(Task.name == kwargs[key]))
n.all()
if not n:
print('Can not find anything with this parameters. Return all.')
n = session.query(Task).all()
total = timedelta(seconds=0)
template = "{0:2}|{1:15}|{2:65}|{3:15}|{4:20}|"
print(template.format('Id', 'Category', 'Name', 'Spent hours', 'Date'))
for i in n:
diff = (i.stop - i.start)
td = timedelta(seconds=diff)
total = total + td
d = datetime.fromtimestamp(i.start)
print(template.format(i.id, i.category, i.name, str(td), str(d)))
print('*****************')
print('Total: %s (hh:mm:ss)' % total)
def add_time(date_from, time_spent, category, issue):
if validate_init() is True:
engine = create_engine("sqlite:///" + tracker_db, echo=False)
Session = sessionmaker(bind=engine)
session = Session()
now = int(time.time())
try:
dt = int(time.mktime(datetime.strptime(date_from, "%d-%m-%Y").timetuple()))
if dt >= now:
print('Date should be in past.')
return False
except ValueError:
print('Wrong date format. Use --help for additional information')
return False
try:
result = re.search('(.*)h', time_spent)
hours = result.group(1)
except AttributeError:
hours = 0
try:
result = re.search('h(.*)m', time_spent)
minutes = result.group(1)
except AttributeError:
minutes = 0
delta_minutes = int(hours) * 60 + int(minutes)
stop_date = dt + delta_minutes * 60
task = Task(category, issue, dt, stop_date)
session.add(task)
session.commit()
session.close()
print('You added time for task: %s ' % issue)
def validate_init():
if not exists(tracker_db):
print('Seems like you use this tracker first time. Please use "init" command first')
return False
else:
return True
if __name__ == '__main__':
main()
| gpl-3.0 |
barnabuskev/wiicop | GetCOPparams.py | 1 | 9350 | #!/usr/bin/env python3
# software to convert raw centre of pressure (COP) data into various COP based measures of balance
# IMPORTS
# ~~~~~~~
import tkinter.filedialog as tk_fd
import sys
import os
import re
import pickle
import numpy as np
import pandas as pd
import configparser
import matplotlib.pyplot as plt
import matplotlib
import COPparamsFs as cp
# %matplotlib inline
#import pdb; pdb.set_trace()
# INITIALISE
# set regular expression to find calibration file
cal_re = "calib.*dat"
# set regular expression to find cop data file
cop_re = "subj.*.dat"
# specify list of cop parameters
cop_params = ['pred_ellipse','path_length','velocity']
# string that signifies subject code
sbjstr = 'subj'
# Display stabilogram flag
disps_f = False
# Save stabilogram flag
saves_f = True
# name of image directory
imdir = "stabilograms"
# Balance board dimensions width and length in mm (Leach, J.M., Mancini, M., Peterka, R.J., Hayes,
# T.L. and Horak, F.B., 2014. Validating and calibrating the Nintendo Wii
# balance board to derive reliable center of pressure measures. Sensors,
# 14(10), pp.18244-18267.)
BB_Y = 238
BB_X = 433
# filter parameters...
# cutoff frequency as proportion of Nyquist
cutoff = 2/3
# order of Butterworth filter
order = 4
# set up if plots flagged
if disps_f or saves_f:
matplotlib.rcParams['toolbar'] = 'None'
# create empty list to store stuff to plot
plt_lst = []
# create figure and axis
fig,ax = plt.subplots(1)
fig.canvas.set_window_title('Stabilogram')
# setup regular expression objects
cal_re_o = re.compile(cal_re)
cop_re_o = re.compile(cop_re)
# create empty pandas dataframes to store calibration and cop data
cal_df=pd.DataFrame(columns=['session','sensor','slope','slope.se','r.coef','p-val'])
# change to $XDG_RUNTIME_DIR/gvfs where samba mounts its shares
# gvfs_pth = os.environ['XDG_RUNTIME_DIR']+'/gvfs/'
# os.chdir(os.path.dirname(gvfs_pth))
# Get user to choose config file
config_file = tk_fd.askopenfilename(title = 'Get config file for study',filetypes=[('Data files', '*.config'), ('All files','*')])
# move to directory above config file
os.chdir(os.path.dirname(config_file))
os.chdir("..")
# read selected config file
config = configparser.ConfigParser()
config.read(config_file)
# get info list of factors
fct_lst = config.options('factors')
cop_df=pd.DataFrame(columns=['session','subj'] + fct_lst + cop_params)
# SEARCH THROUGH CHOSEN DIRECTORY STRUCTURE
# for data and calibration files
seshd = tk_fd.askdirectory(title = 'Open study directory containing sessions')
if not seshd:
sys.exit()
for root, dirs, files in os.walk(seshd):
# for each directory
if len(files) > 0:
# look for calibration file using reg expression
c_lst = list(filter(cal_re_o.match,files))
if c_lst:
# if list c_lst is not empty..
# should contain only one calibration file
assert len(c_lst)==1, "more than one calibration file in directory: {}".format(root)
# open calibration file
c_pth = os.path.join(root,c_lst[0])
with open(c_pth,'rb') as fptr:
tmp = pickle.load(fptr, fix_imports=False)
# Store 1 row of calibration data...
cal_dat = tmp['details']
nxt_cal = len(cal_df.index)
s_ind = 0
for sns in cal_dat.keys():
# for each sensor..
# create empty row
cal_df.loc[nxt_cal+s_ind] = None
cal_df.ix[nxt_cal+s_ind,'session'] = os.path.basename(root)
cal_df.ix[nxt_cal+s_ind,'sensor'] = sns
cal_df.ix[nxt_cal+s_ind,'slope'] = cal_dat[sns]['m']
cal_df.ix[nxt_cal+s_ind,'slope.se'] = cal_dat[sns]['se']
cal_df.ix[nxt_cal+s_ind,'r.coef'] = cal_dat[sns]['r']
cal_df.ix[nxt_cal+s_ind,'p-val'] = cal_dat[sns]['p']
s_ind+=1
# look for cop data file using reg expression
d_lst = list(filter(cop_re_o.match,files))
if d_lst:
for fi in d_lst:
# for each data file..
# store row of study metadata...
nxt_cop = len(cop_df.index)
# create empty row
cop_df.loc[nxt_cop] = None
# strip extension
prts = fi.split('.')
# save levels as list
lev_lst = prts[0].split('_')
# get subject code
tmp = [s for s in lev_lst if sbjstr in s]
scode = tmp[0].strip(sbjstr)
# convert to int then back to string with 3 leading zeros
scode = int(scode)
scode = str(scode).zfill(3)
cop_df.ix[nxt_cop,'subj'] = scode
# get session
cop_df.ix[nxt_cop,'session'] = os.path.basename(root)
# read study metadata into df
for fct_i in fct_lst:
for lev in lev_lst:
if lev in config['factors'][fct_i]:
cop_df.ix[nxt_cop,fct_i] = lev
# read COP data
d_pth = os.path.join(root,fi)
with open(d_pth,'rb') as fptr:
pkl = pickle.load(fptr)
# convert time data into seconds
t_dat = pkl['timedat']
t_dat = t_dat[:,0] + t_dat[:,1]/1000000
# reshape t_dat so that it is an nd array with one singleton dimension
t_dat = np.reshape(t_dat,(t_dat.size,-1))
# add time data to cop data
cop_dat = np.concatenate((pkl['cop'],t_dat), axis=1)
# Preprocess COP data
# resample to even sample points
cop_dat_r = cp.resamp(cop_dat)
# low pass filtering
cop_dat_f = cp.bfilt(cop_dat_r, cutoff, order)
# # TEST ***
# # plot coronal
# fg, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
# # resampled or raw
# ax1.plot(cop_dat[:,2],cop_dat[:,0],'bo-')
# # ax1.plot(t,cor,'bo-')
# ax1.set_xlabel('time (secs)')
# ax1.set_ylabel('coronal plane')
# ax1.set_title('Raw data')
# ax1.grid()
# # resampled and filtered
# ax2.plot(cop_dat_f[:,2],cop_dat_f[:,0],'bo-')
# ax2.set_xlabel('time (secs)')
# ax2.set_title('Resampled and filtered')
# ax2.grid()
# mng = plt.get_current_fig_manager()
# mng.resize(*mng.window.maxsize())
# plt.show()
# # ***
# Get COP parameters...
# 95% Prediction interval
cop_df.ix[nxt_cop,'pred_ellipse'] = cp.PI95(cop_dat_f)
# path length
pl = cp.pathl(cop_dat)
cop_df.ix[nxt_cop,'path_length'] = pl
# velocity
import pdb; pdb.set_trace()
cop_df.ix[nxt_cop,'velocity'] = pl/int(cop_df.ix[nxt_cop,'acq_time'])
# store data for plotting if flagged
if disps_f or saves_f:
# create a dictionary of relevant study factors
std_fct = {}
std_fct['subj'] = scode
for fct_i in fct_lst:
for lev in lev_lst:
if lev in config['factors'][fct_i]:
std_fct[fct_i] = lev
plt_lst.append([[cop_dat, std_fct]])
# Plot stabilograms (see Scoppa2013) if flagged
if disps_f or saves_f:
if saves_f:
# create image directory if it doesn't exist in current working directory
imdirpth = os.path.join(os.getcwd(),imdir)
if not(os.path.isdir(imdirpth)):
os.mkdir(imdirpth)
ax.axis([-BB_X/2, BB_X/2, -BB_Y/2, BB_Y/2])
ax.grid()
plot1 = True
plt.ion()
for ia in plt_lst:
if plot1:
line_h, = ax.plot(ia[0][0][:,0], ia[0][0][:,1],'b-')
plot1 = False
else:
line_h.set_xdata(ia[0][0][:,0])
line_h.set_ydata(ia[0][0][:,1])
plt.title('Subject: '+ia[0][1]['subj'])
# get factors string
fct_str = ""
for ib in fct_lst:
fct_str = fct_str + ib + ": " + ia[0][1][ib] + ", "
fct_str = fct_str.rstrip(", ")
txt_h = plt.text(-200,100,fct_str, fontsize=14)
if disps_f:
plt.show()
plt.pause(0.5)
if saves_f:
# create file name
im_file = ".png"
for ib in fct_lst:
im_file = ia[0][1][ib] + im_file
im_file = "subj" + ia[0][1]['subj'] + "_" + im_file
plt.savefig(os.path.join(os.getcwd(),imdir,im_file))
txt_h.remove()
# create results directory if it doesn't exist
res_dir = os.path.join(seshd,'results')
if not(os.path.isdir(res_dir)):
os.mkdir(res_dir)
# write calibration results
calib_file = os.path.join(res_dir,'calib_results.csv')
cal_df.to_csv(calib_file)
# write study results
study_file = os.path.join(res_dir,'study_results.csv')
cop_df.to_csv(study_file)
| gpl-3.0 |
GaloMALDONADO/HQP | hqp/multi_contact/utils.py | 1 | 28351 | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 1 16:54:39 2016
@author: adelpret
"""
from hqp.polytope_conversion_utils import crossMatrix, cone_span_to_face, eliminate_redundant_inequalities
from hqp.geom_utils import is_vector_inside_cone, plot_inequalities
from hqp.transformations import euler_matrix
import hqp.plot_utils as plut
import matplotlib.pyplot as plt
from math import atan, pi
import numpy as np
from numpy.linalg import norm
from numpy.random import uniform
from numpy import sqrt
import cProfile
EPS = 1e-5;
def compute_centroidal_cone_generators(contact_points, contact_normals, mu):
''' Compute two matrices. The first one contains the 6d generators of the
centroidal cone. The second one contains the 3d generators of the
contact cones (4 generators for each contact point).
'''
assert contact_points.shape[1]==3, "Wrong size of contact_points"
assert contact_normals.shape[1]==3, "Wrong size of contact_normals"
assert contact_points.shape[0]==contact_normals.shape[0], "Size of contact_points and contact_normals do not match"
contact_points = np.asarray(contact_points);
contact_normals = np.asarray(contact_normals);
nContacts = contact_points.shape[0];
cg = 4; # number of generators for each friction cone
nGen = nContacts*cg; # total number of generators
if(isinstance(mu, (list, tuple, np.ndarray))):
mu = np.asarray(mu).squeeze();
else:
mu = mu*np.ones(nContacts);
G = np.zeros((3,nGen));
A = np.zeros((6,nGen));
P = np.zeros((6,3));
P[:3,:] = -np.identity(3);
muu = mu/sqrt(2.0);
for i in range(nContacts):
# compute tangent directions
contact_normals[i,:] = contact_normals[i,:]/norm(contact_normals[i,:]);
T1 = np.cross(contact_normals[i,:], [0.,1.,0.]);
if(norm(T1)<EPS):
T1 = np.cross(contact_normals[i,:], [1.,0.,0.]);
T1 = T1/norm(T1);
T2 = np.cross(contact_normals[i,:], T1);
G[:,cg*i+0] = muu[i]*T1 + muu[i]*T2 + contact_normals[i,:];
G[:,cg*i+1] = muu[i]*T1 - muu[i]*T2 + contact_normals[i,:];
G[:,cg*i+2] = -muu[i]*T1 + muu[i]*T2 + contact_normals[i,:];
G[:,cg*i+3] = -muu[i]*T1 - muu[i]*T2 + contact_normals[i,:];
# compute matrix mapping contact forces to gravito-inertial wrench
P[3:,:] = -crossMatrix(contact_points[i,:]);
# project generators in 6d centroidal space
A[:,cg*i:cg*i+cg] = np.dot(P, G[:,cg*i:cg*i+cg]);
# normalize generators
for i in range(nGen):
G[:,i] /= norm(G[:,i]);
A[:,i] /= norm(A[:,i]);
return (A, G);
def find_static_equilibrium_com(mass, com_lb, com_ub, H, h, MAX_ITER=1000):
''' Find a position of the center of mass that is in static equilibrium.'''
FOUND_STATIC_COM = False;
g_vector = np.array([0,0,-9.81]);
w = np.zeros(6);
w[2] = -mass*9.81;
i = 0;
while(not FOUND_STATIC_COM):
#STEVE: consider actual bounds on contact points
c0 = np.array([np.random.uniform(com_lb[j], com_ub[j]) for j in range(3)]);
w[3:] = mass*np.cross(c0, g_vector);
FOUND_STATIC_COM = np.max(np.dot(H, w) - h) <= 1e-12;
i += 1;
if(i>=MAX_ITER):
# print "ERROR: Could not find com position in static equilibrium in %d iterations."%MAX_ITER;
return (False,c0);
return (True,c0);
def generate_rectangle_contacts(lx, ly, pos, rpy):
''' Generate the 4 contact points and the associated normal directions
for a rectangular contact.
'''
# contact points in local frame
p = np.array([[ lx, ly, 0],
[ lx, -ly, 0],
[-lx, -ly, 0],
[-lx, ly, 0]]);
# normal direction in local frame
n = np.array([0, 0, 1]);
# compute rotation matrix
R = euler_matrix(rpy[0], rpy[1], rpy[2], 'sxyz');
R = R[:3,:3];
# contact points in world frame
p[0,:] = pos.reshape(3) + np.dot(R,p[0,:]);
p[1,:] = pos.reshape(3) + np.dot(R,p[1,:]);
p[2,:] = pos.reshape(3) + np.dot(R,p[2,:]);
p[3,:] = pos.reshape(3) + np.dot(R,p[3,:]);
# normal directions in world frame
n = np.dot(R,n);
N = np.vstack([n, n, n, n]);
return (p,N);
def generate_contacts(N_CONTACTS, lx, ly, mu, CONTACT_POINT_LOWER_BOUNDS, CONTACT_POINT_UPPER_BOUNDS,
RPY_LOWER_BOUNDS, RPY_UPPER_BOUNDS, MIN_CONTACT_DISTANCE, GENERATE_QUASI_FLAT_CONTACTS=False):
''' Generate the contact points and the contact normals associated to randomly
generated rectangular contact surfaces.
'''
contact_pos = np.zeros((N_CONTACTS, 3));
contact_rpy = np.zeros((N_CONTACTS, 3));
p = np.zeros((4*N_CONTACTS,3)); # contact points
N = np.zeros((4*N_CONTACTS,3)); # contact normals
g_vector = np.array([0, 0, -9.81]);
# Generate contact positions and orientations
for i in range(N_CONTACTS):
while True:
contact_pos[i,:] = uniform(CONTACT_POINT_LOWER_BOUNDS, CONTACT_POINT_UPPER_BOUNDS); # contact position
collision = False;
for j in range(i-1):
if(np.linalg.norm(contact_pos[i,:]-contact_pos[j,:])<MIN_CONTACT_DISTANCE):
collision = True;
if(not collision):
break;
while True:
contact_rpy[i,:] = uniform(RPY_LOWER_BOUNDS, RPY_UPPER_BOUNDS); # contact orientation
(p[i*4:i*4+4,:],N[i*4:i*4+4,:]) = generate_rectangle_contacts(lx, ly, contact_pos[i,:], contact_rpy[i,:].T);
if(GENERATE_QUASI_FLAT_CONTACTS==False or is_vector_inside_cone(-g_vector, mu, N[i*4,:].T)):
break;
return (p, N);
def compute_GIWC(contact_points, contact_normals, mu, eliminate_redundancies=False, USE_DIAGONAL_GENERATORS=True):
''' Compute the gravito-inertial wrench cone (i.e. the centroidal cone).
@param contact_points Nx3 matrix containing the contact points
@param contact_normals Nx3 matrix containing the contact normals
@param mu A scalar friction coefficient, or an array of friction coefficients (one for each contact point)
@param cg Number of generator for the friction cone of each contact point
@param USE_DIAGONAL_GENERATORS If True generate the generators turned of 45 degrees
@return (H,h) Matrix and vector defining the GIWC as H*w <= h
'''
(S_centr, S) = compute_centroidal_cone_generators(contact_points, contact_normals, mu);
# convert generators to inequalities
H = cone_span_to_face(S_centr,eliminate_redundancies);
h = np.zeros(H.shape[0]);
return (H,h);
def compute_support_polygon(H, h, mass, g_vector, eliminate_redundancies=False):
''' Compute the 2d support polygon A*c<=b given the gravito-inertial wrench
cone (GIWC) as H*w <= h.
Project inequalities from 6d to 2d x-y com space.
The com wrench to maintain static equilibrium is an affine function
of the com position c:
w = D*c+d
Substituting this into the GIWC inequalities we get:
H*w <= h
H*D*c + H*d <= h
H*D*c <= h - H*d
'''
D = np.zeros((6,3));
d = np.zeros(6);
D[3:,:] = -mass*crossMatrix(g_vector);
d[:3] = mass*g_vector;
A = np.dot(H, D[:,:2]);
b = h - np.dot(H, d);
if(eliminate_redundancies):
(A,b) = eliminate_redundant_inequalities(A,b);
return (A,b);
def compute_com_acceleration_polytope(com_pos, H, h, mass, g_vector, eliminate_redundancies=False):
''' Compute the inequalities A*x<=b defining the polytope of feasible CoM accelerations
assuming zero rate of change of angular momentum.
@param c0 Current com position
@param H Matrix of GIWC, which can be computed by compute_GIWC
@param h Vector of GIWC, which can be computed by compute_GIWC
@param mass Mass of the system in Kg
@param g_vector Gravity vector
@return (A,b)
'''
K = np.zeros((6,3));
K[:3,:] = mass*np.identity(3);
K[3:,:] = mass*crossMatrix(com_pos);
b = h - np.dot(H,np.dot(K,g_vector)); #constant term of F
A = np.dot(-H,K); #matrix multiplying com acceleration
if(eliminate_redundancies):
(A,b) = eliminate_redundant_inequalities(A,b);
return A,b;
def can_I_stop(c0, dc0, contact_points, contact_normals, mu, mass, T_0, MAX_ITER=1000, DO_PLOTS=False, verb=0,
eliminate_redundancies=False):
''' Determine whether the system can come to a stop without changing contacts.
Keyword arguments:
c0 -- initial CoM position
dc0 -- initial CoM velocity
contact points -- a matrix containing the contact points
contact normals -- a matrix containing the contact normals
mu -- friction coefficient (either a scalar or an array)
mass -- the robot mass
T_0 -- an initial guess for the time to stop
Output: (is_stable, c_final, dc_final), where:
is_stable -- boolean value
c_final -- final com position
dc_final -- final com velocity
'''
# Steps:
# - Compute GIWC: H*w <= h, where w=(m*(g-ddc), m*cx(g-ddc))
# - Project GIWC in (alpha,DDalpha) space, where c=c0+alpha*v, ddc=-DDalpha*v, v=dc0/||dc0||: A*(alpha,DDalpha)<=b
# - Find ordered (left-most first, clockwise) vertices of 2d polytope A*(alpha,DDalpha)<=b: V
# - Initialize: alpha=0, Dalpha=||dc0||
# - LOOP:
# - Find current active inequality: a*alpha + b*DDalpha <= d (i.e. DDalpha upper bound for current alpha value)
# - If DDalpha_upper_bound<0: return False
# - Find alpha_max (i.e. value of alpha corresponding to right vertex of active inequality)
# - Initialize: t=T_0, t_ub=10, t_lb=0
# LOOP:
# - Integrate LDS until t: DDalpha = d/b - (a/d)*alpha
# - if(Dalpha(t)==0 && alpha(t)<=alpha_max): return (True, alpha(t)*v)
# - if(alpha(t)==alpha_max && Dalpha(t)>0): alpha=alpha(t), Dalpha=Dalpha(t), break
# - if(alpha(t)<alpha_max && Dalpha>0): t_lb=t, t=(t_ub+t_lb)/2
# - else t_ub=t, t=(t_ub+t_lb)/2
assert mass>0.0, "Mass is not positive"
assert T_0>0.0, "Time is not positive"
assert mu>0.0, "Friction coefficient is not positive"
c0 = np.asarray(c0).squeeze();
dc0 = np.asarray(dc0).squeeze();
contact_points = np.asarray(contact_points);
contact_normals = np.asarray(contact_normals);
assert c0.shape[0]==3, "Com position has not size 3"
assert dc0.shape[0]==3, "Com velocity has not size 3"
assert contact_points.shape[1]==3, "Contact points have not size 3"
assert contact_normals.shape[1]==3, "Contact normals have not size 3"
assert contact_points.shape[0]==contact_normals.shape[0], "Number of contact points and contact normals do not match"
g_vector = np.array([0,0,-9.81]);
(H,h) = compute_GIWC(contact_points, contact_normals, mu, eliminate_redundancies=eliminate_redundancies);
# If initial com velocity is zero then test static equilibrium
if(np.linalg.norm(dc0) < EPS):
w = np.zeros(6);
w[2] = -mass*9.81;
w[3:] = mass*np.cross(c0, g_vector);
if(np.max(np.dot(H, w) - h) < EPS):
return (True, c0, dc0);
return (False, c0, dc0);
# Project GIWC in (alpha,DDalpha) space, where c=c0+alpha*v, ddc=DDalpha*v, v=dc0/||dc0||: a*alpha + b*DDalpha <= d
v = dc0/np.linalg.norm(dc0);
K = np.zeros((6,3));
K[:3,:] = mass*np.identity(3);
K[3:,:] = mass*crossMatrix(c0);
d = h - np.dot(H, np.dot(K,g_vector));
b = np.dot(np.dot(-H,K),v); # vector multiplying com acceleration
tmp = np.array([[0,0,0,0,1,0], #temp times the variation dc will result in
[0,0,0,-1,0,0], # [ 0 0 0 dc_y -dc_x 0]^T
[0,0,0,0,0,0]]).T;
a = mass*9.81*np.dot(np.dot(H,tmp),v);
if(DO_PLOTS):
range_plot = 10;
ax = plot_inequalities(np.vstack([a,b]).T, d, [-range_plot,range_plot], [-range_plot,range_plot]);
plt.axis([0,range_plot,-range_plot,0])
plt.title('Feasible com pos-acc');
plut.movePlotSpines(ax, [0, 0]);
ax.set_xlabel('com pos');
ax.set_ylabel('com acc');
plt.show();
# Eliminate redundant inequalities
if(eliminate_redundancies):
A_red, d = eliminate_redundant_inequalities(np.vstack([a,b]).T, d);
a = A_red[:,0];
b = A_red[:,1];
# Normalize inequalities to have unitary coefficients for DDalpha: b*DDalpha <= d - a*alpha
for i in range(a.shape[0]):
if(abs(b[i]) > EPS):
a[i] /= abs(b[i]);
d[i] /= abs(b[i]);
b[i] /= abs(b[i]);
elif(verb>0):
print "WARNING: cannot normalize %d-th inequality because coefficient of DDalpha is almost zero"%i, b[i];
# Initialize: alpha=0, Dalpha=||dc0||
alpha = 0;
Dalpha = np.linalg.norm(dc0);
#sort b indices to only keep negative values
negative_ids = np.where(b<0)[0];
if(negative_ids.shape[0]==0):
# CoM acceleration is unbounded
return (True, c0, 0.0*v);
for iiii in range(MAX_ITER):
# Find current active inequality: b*DDalpha <= d - a*alpha (i.e. DDalpha lower bound for current alpha value)
a_alpha_d = a*alpha-d;
a_alpha_d_negative_bs = a_alpha_d[negative_ids];
(i_DDalpha_min, DDalpha_min) = [(i,a_min) for (i, a_min) in [(j, a_alpha_d[j]) for j in negative_ids] if (a_min >= a_alpha_d_negative_bs).all()][0]
if(verb>0):
print "DDalpha_min", DDalpha_min;
print "i_DDalpha_min", i_DDalpha_min;
# If DDalpha_lower_bound>0: return False
if(DDalpha_min >= -EPS):
if(verb>0):
print "Algorithm converged because DDalpha is positive";
return (False, c0+alpha*v, Dalpha*v);
# Find alpha_max (i.e. value of alpha corresponding to right vertex of active inequality)
den = b*a[i_DDalpha_min] + a;
i_pos = np.where(den>0)[0];
if(i_pos.shape[0]==0):
# print "WARNING b*a_i0+a is never positive, that means that alpha_max is unbounded";
alpha_max = 10.0;
else:
alpha_max = np.min((d[i_pos] + b[i_pos]*d[i_DDalpha_min])/den[i_pos]);
if(verb>0):
print "alpha_max", alpha_max;
if(alpha_max<alpha):
# We reach the right limit of the polytope of feasible com pos-acc.
# This means there is no feasible com acc for farther com position (with zero angular momentum derivative)
return (False, c0+alpha*v, Dalpha*v);
# If DDalpha is not always negative on the current segment then update alpha_max to the point
# where the current segment intersects the x axis
if( a[i_DDalpha_min]*alpha_max - d[i_DDalpha_min] > 0.0):
alpha_max = d[i_DDalpha_min] / a[i_DDalpha_min];
if(verb>0):
print "Updated alpha_max", alpha_max;
# Initialize: t=T_0, t_ub=10, t_lb=0
t = T_0;
t_ub = 10;
t_lb = 0;
if(abs(a[i_DDalpha_min])>EPS):
omega = sqrt(a[i_DDalpha_min]+0j);
if(verb>0):
print "omega", omega;
else:
# if the acceleration is constant over time I can compute directly the time needed
# to bring the velocity to zero:
# Dalpha(t) = Dalpha(0) + t*DDalpha = 0
# t = -Dalpha(0)/DDalpha
t = Dalpha / d[i_DDalpha_min];
alpha_t = alpha + t*Dalpha - 0.5*t*t*d[i_DDalpha_min];
if(alpha_t <= alpha_max+EPS):
if(verb>0):
print "DDalpha_min is independent from alpha, algorithm converged to Dalpha=0";
return (True, c0+alpha_t*v, 0.0*v);
# if alpha reaches alpha_max before the velocity is zero, then compute the time needed to reach alpha_max
# alpha(t) = alpha(0) + t*Dalpha(0) + 0.5*t*t*DDalpha = alpha_max
# t = (- Dalpha(0) +/- sqrt(Dalpha(0)^2 - 2*DDalpha(alpha(0)-alpha_max))) / DDalpha;
# where DDalpha = -d[i_DDalpha_min]
# Having two solutions, we take the smallest one because we want to find the first time
# at which alpha reaches alpha_max
delta = sqrt(Dalpha**2 + 2*d[i_DDalpha_min]*(alpha-alpha_max))
t = ( Dalpha - delta) / d[i_DDalpha_min];
if(t<0.0):
# If the smallest time at which alpha reaches alpha_max is negative print a WARNING because this should not happen
print "WARNING: Time is less than zero:", t, alpha, Dalpha, d[i_DDalpha_min], alpha_max;
t = (Dalpha + delta) / d[i_DDalpha_min];
if(t<0.0):
# If also the largest time is negative print an ERROR and return
print "ERROR: Time is still less than zero:", t, alpha, Dalpha, d[i_DDalpha_min], alpha_max;
return (False, c0+alpha*v, Dalpha*v);
bisection_converged = False;
for jjjj in range(MAX_ITER):
# Integrate LDS until t: DDalpha = a*alpha - d
if(abs(a[i_DDalpha_min])>EPS):
# if a=0 then the acceleration is a linear function of the position and I need to use this formula to integrate
sh = np.sinh(omega*t);
ch = np.cosh(omega*t);
alpha_t = ch*alpha + sh*Dalpha/omega + (1-ch)*(d[i_DDalpha_min]/a[i_DDalpha_min]);
Dalpha_t = omega*sh*alpha + ch*Dalpha - omega*sh*(d[i_DDalpha_min]/a[i_DDalpha_min]);
else:
# if a=0 then the acceleration is constant and I need to use this formula to integrate
alpha_t = alpha + t*Dalpha - 0.5*t*t*d[i_DDalpha_min];
Dalpha_t = Dalpha - t*d[i_DDalpha_min];
if(np.imag(alpha_t) != 0.0):
print "ERROR alpha is imaginary", alpha_t;
return (False, c0+alpha*v, Dalpha*v);
if(np.imag(Dalpha_t) != 0.0):
print "ERROR Dalpha is imaginary", Dalpha_t
return (False, c0+alpha*v, Dalpha*v);
alpha_t = np.real(alpha_t);
Dalpha_t = np.real(Dalpha_t);
if(verb>0):
print "Bisection iter",jjjj,"alpha",alpha_t,"Dalpha",Dalpha_t,"t", t
if(abs(Dalpha_t)<EPS and alpha_t <= alpha_max+EPS):
if(verb>0):
print "Algorithm converged to Dalpha=0";
return (True, c0+alpha_t*v, Dalpha_t*v);
if(abs(alpha_t-alpha_max)<EPS and Dalpha_t>0):
alpha = alpha_max+EPS;
Dalpha = Dalpha_t;
bisection_converged = True;
break;
if(alpha_t<alpha_max and Dalpha_t>0):
t_lb=t;
t=(t_ub+t_lb)/2;
else:
t_ub=t;
t=(t_ub+t_lb)/2;
if(not bisection_converged):
print "ERROR: Bisection search did not converge in %d iterations"%MAX_ITER;
return (False, c0+alpha*v, Dalpha*v);
print "ERROR: Numerical integration did not converge in %d iterations"%MAX_ITER;
if(DO_PLOTS):
plt.show();
return (False, c0+alpha*v, Dalpha*v);
''' PROBLEM 2: MULTI-CONTACT CAPTURE POINT (what's the max vel in a given direction such that I can stop)
Input: initial CoM position c0, initial CoM velocity direction v, contact points CP, friction coefficient mu, T_0=1
Output: The (norm of the) max initial CoM velocity such that I can stop without changing contacts is
a piece-wise linear function of the CoM position (on the given line), which is returned as a
list P of 2D points in the space where:
- x is the CoM offset relatively to c0 along v (i.e. c=c0+x*v)
- y is the maximum CoM velocity along v
Steps:
- Compute GIWC: H*w <= h, where w=(m*(g-ddc), m*cx(g-ddc))
- Project GIWC in (alpha,DDalpha) space, where c=c0+alpha*v, ddc=-DDalpha*v: A*(alpha,DDalpha)<=b
- Find ordered (right-most first, counter clockwise) vertices of 2d polytope A*(alpha,DDalpha)<=b: V
- if(line passing through c0 in direction v does not intersect static-equilibrium polytope): return []
- Find extremum of static-equilibrium polytope in direction v: c1
- Initialize: alpha=||c1-c0||, Dalpha=0
- Initialize: P=[(alpha,0)]
- LOOP:
- Find current active inequality: a*alpha + b*DDalpha <= d (i.e. DDalpha upper bound for current alpha value)
- Find alpha_min (i.e. value of alpha corresponding to left vertex of active inequality)
- Initialize: t=-T_0, t_ub=-10, t_lb=0
LOOP:
- Integrate backward in time LDS until t: DDalpha = d/b - (a/d)*alpha
- if(alpha(t)==alpha_min): alpha=alpha(t), Dalpha=Dalpha(t)
P = [(alpha_min,Dalpha)]+P
break
- if(alpha(t)<alpha_min): t_ub=t, t=(t_ub+t_lb)/2
- else: t_lb=t, t=(t_ub+t_lb)/2
- if(alpha_min<=0): return P
- if(left vertex of active inequality is left-most vertex): return P
PROBLEM 3: VELOCITY PROPAGATION WITH CONTACT TRANSITION
Input: initial CoM pos c0, initial CoM vel dc0, final CoM pos c2, initial contacts CP0, final contacts CP2,
friction coefficient mu, min CoM distance d_min to travel in CP1
Assumptions:
- CoM path is a straight line from c0 to c2.
- CP0 and CP2 contains the same number of contacts N, out of which N-1 contacts must be exactly the same.
Output: Min and max feasible final CoM velocity (in norm) c2_min, c2_max
Steps:
- Define CP1 as the intersection of CP0 and CP2
- Define alpha as: c=c0+alpha*v, ddc=DDalpha*v
- Define v=(c2-c0)/||c2-c0||
- Compute the GIWCs associated to CP0, CP1, CP2: H^i*w <= h^i, where w=(m*(g-ddc), m*cx(g-ddc)), i=0,1,2
- Project GIWCs Ci in (alpha,DDalpha) space: A^i*(alpha,DDalpha)<=b^i
- Compute intersection between C0 and C1 (C01), and C1 and C2 (C12)
- If C01 contains zero
'''
def test():
DO_PLOTS = False;
PLOT_3D = False;
mass = 75; # mass of the robot
mu = 0.5; # friction coefficient
lx = 0.1; # half foot size in x direction
ly = 0.07; # half foot size in y direction
USE_DIAGONAL_GENERATORS = True;
GENERATE_QUASI_FLAT_CONTACTS = True;
#First, generate a contact configuration
CONTACT_POINT_UPPER_BOUNDS = [ 0.5, 0.5, 0.0];
CONTACT_POINT_LOWER_BOUNDS = [-0.5, -0.5, 0.0];
gamma = atan(mu); # half friction cone angle
RPY_LOWER_BOUNDS = [-0*gamma, -0*gamma, -pi];
RPY_UPPER_BOUNDS = [+0*gamma, +0*gamma, +pi];
MIN_CONTACT_DISTANCE = 0.3;
N_CONTACTS = 2
READ_CONTACTS_FROM_FILE = True;
X_MARG = 0.07;
Y_MARG = 0.07;
if(READ_CONTACTS_FROM_FILE):
import pickle
f = open("./data.pkl", 'rb');
res = pickle.load(f);
f.close();
# (p, N) = generate_contacts(N_CONTACTS, lx, ly, mu, CONTACT_POINT_LOWER_BOUNDS, CONTACT_POINT_UPPER_BOUNDS, RPY_LOWER_BOUNDS, RPY_UPPER_BOUNDS, MIN_CONTACT_DISTANCE, GENERATE_QUASI_FLAT_CONTACTS);
p = res['contact_points'].T;
N = res['contact_normals'].T;
print "Contact points\n", p;
print "Contact normals\n", 1e3*N
X_LB = np.min(p[:,0]-X_MARG);
X_UB = np.max(p[:,0]+X_MARG);
Y_LB = np.min(p[:,1]-Y_MARG);
Y_UB = np.max(p[:,1]+Y_MARG);
Z_LB = np.min(p[:,2]-0.05);
Z_UB = np.max(p[:,2]+1.5);
(H,h) = compute_GIWC(p, N, mu, False, USE_DIAGONAL_GENERATORS);
(succeeded, c0) = find_static_equilibrium_com(mass, [X_LB, Y_LB, Z_LB], [X_UB, Y_UB, Z_UB], H, h);
if(not succeeded):
print "Impossible to find a static equilibrium CoM position with the contacts read from file";
return
else:
succeeded = False;
while(succeeded == False):
(p, N) = generate_contacts(N_CONTACTS, lx, ly, mu, CONTACT_POINT_LOWER_BOUNDS, CONTACT_POINT_UPPER_BOUNDS, RPY_LOWER_BOUNDS, RPY_UPPER_BOUNDS, MIN_CONTACT_DISTANCE, GENERATE_QUASI_FLAT_CONTACTS);
X_LB = np.min(p[:,0]-X_MARG);
X_UB = np.max(p[:,0]+X_MARG);
Y_LB = np.min(p[:,1]-Y_MARG);
Y_UB = np.max(p[:,1]+Y_MARG);
Z_LB = np.min(p[:,2]-0.05);
Z_UB = np.max(p[:,2]+1.5);
(H,h) = compute_GIWC(p, N, mu, False, USE_DIAGONAL_GENERATORS);
(succeeded, c0) = find_static_equilibrium_com(mass, [X_LB, Y_LB, Z_LB], [X_UB, Y_UB, Z_UB], H, h);
dc0 = np.random.uniform(-1, 1, size=3);
dc0[2] = 0;
if(DO_PLOTS):
f, ax = plut.create_empty_figure();
for j in range(p.shape[0]):
ax.scatter(p[j,0], p[j,1], c='k', s=100);
ax.scatter(c0[0], c0[1], c='r', s=100);
com_x = np.zeros(2);
com_y = np.zeros(2);
com_x[0] = c0[0];
com_y[0] = c0[1];
com_x[1] = c0[0]+dc0[0];
com_y[1] = c0[1]+dc0[1];
ax.plot(com_x, com_y, color='b');
plt.axis([X_LB,X_UB,Y_LB,Y_UB]);
plt.title('Contact Points and CoM position');
if(PLOT_3D):
fig = plt.figure(figsize=plt.figaspect(0.5)*1.5)
ax = fig.gca(projection='3d')
line_styles =["b", "r", "c", "g"];
ss = [0.05, 0.1, 0.15, 0.2, 0.25, 0.3];
ax.scatter(c0[0],c0[1],c0[2], c='k', marker='o');
for i in range(p.shape[0]):
ax.scatter(p[i,0],p[i,1],p[i,2], c=line_styles[i%len(line_styles)], marker='o');
for s in ss:
ax.scatter(p[i,0]+s*N[i,0],p[i,1]+s*N[i,1],p[i,2]+s*N[i,2], c=line_styles[i%len(line_styles)], marker='x');
for s in ss:
ax.scatter(c0[0]+s*dc0[0],c0[1]+s*dc0[1],c0[2]+s*dc0[2], c='k', marker='x');
ax.set_xlabel('x'); ax.set_ylabel('y'); ax.set_zlabel('z');
# return can_I_stop(c0, dc0, p, N, mu, mass, 1.0, 100, DO_PLOTS=DO_PLOTS);
(has_stopped, c_final, dc_final) = can_I_stop(c0, dc0, p, N, mu, mass, 1.0, 100, DO_PLOTS=DO_PLOTS);
print "Contact points\n", p;
print "Contact normals\n", N
print "Initial com position", c0
print "Initial com velocity", dc0, "norm %.3f"%norm(dc0)
print "Final com position", c_final
print "Final com velocity", dc_final, "norm %.3f"%norm(dc_final)
if(has_stopped):
print "The system is stable"
else:
print "The system is unstable"
return True;
if __name__=="__main__":
np.set_printoptions(precision=2, suppress=True, linewidth=100);
np.random.seed(0);
for i in range(1):
try:
test();
# ret = cProfile.run("test()");
except Exception as e:
print e;
continue;
| lgpl-3.0 |
mfjb/scikit-learn | examples/tree/plot_tree_regression.py | 206 | 1476 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="data")
plt.plot(X_test, y_1, c="g", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, c="r", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
boompieman/iim_project | project_python2/lib/python2.7/site-packages/nltk/classify/scikitlearn.py | 4 | 5920 | # Natural Language Toolkit: Interface to scikit-learn classifiers
#
# Author: Lars Buitinck <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
scikit-learn (http://scikit-learn.org) is a machine learning library for
Python. It supports many classification algorithms, including SVMs,
Naive Bayes, logistic regression (MaxEnt) and decision trees.
This package implement a wrapper around scikit-learn classifiers. To use this
wrapper, construct a scikit-learn estimator object, then use that to construct
a SklearnClassifier. E.g., to wrap a linear SVM with default settings:
>>> from sklearn.svm import LinearSVC
>>> from nltk.classify.scikitlearn import SklearnClassifier
>>> classif = SklearnClassifier(LinearSVC())
A scikit-learn classifier may include preprocessing steps when it's wrapped
in a Pipeline object. The following constructs and wraps a Naive Bayes text
classifier with tf-idf weighting and chi-square feature selection to get the
best 1000 features:
>>> from sklearn.feature_extraction.text import TfidfTransformer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> from sklearn.naive_bayes import MultinomialNB
>>> from sklearn.pipeline import Pipeline
>>> pipeline = Pipeline([('tfidf', TfidfTransformer()),
... ('chi2', SelectKBest(chi2, k=1000)),
... ('nb', MultinomialNB())])
>>> classif = SklearnClassifier(pipeline)
"""
from __future__ import print_function, unicode_literals
from nltk.classify.api import ClassifierI
from nltk.probability import DictionaryProbDist
from nltk import compat
try:
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import LabelEncoder
except ImportError:
pass
__all__ = ['SklearnClassifier']
@compat.python_2_unicode_compatible
class SklearnClassifier(ClassifierI):
"""Wrapper for scikit-learn classifiers."""
def __init__(self, estimator, dtype=float, sparse=True):
"""
:param estimator: scikit-learn classifier object.
:param dtype: data type used when building feature array.
scikit-learn estimators work exclusively on numeric data. The
default value should be fine for almost all situations.
:param sparse: Whether to use sparse matrices internally.
The estimator must support these; not all scikit-learn classifiers
do (see their respective documentation and look for "sparse
matrix"). The default value is True, since most NLP problems
involve sparse feature sets. Setting this to False may take a
great amount of memory.
:type sparse: boolean.
"""
self._clf = estimator
self._encoder = LabelEncoder()
self._vectorizer = DictVectorizer(dtype=dtype, sparse=sparse)
def __repr__(self):
return "<SklearnClassifier(%r)>" % self._clf
def classify_many(self, featuresets):
"""Classify a batch of samples.
:param featuresets: An iterable over featuresets, each a dict mapping
strings to either numbers, booleans or strings.
:return: The predicted class label for each input sample.
:rtype: list
"""
X = self._vectorizer.transform(featuresets)
classes = self._encoder.classes_
return [classes[i] for i in self._clf.predict(X)]
def prob_classify_many(self, featuresets):
"""Compute per-class probabilities for a batch of samples.
:param featuresets: An iterable over featuresets, each a dict mapping
strings to either numbers, booleans or strings.
:rtype: list of ``ProbDistI``
"""
X = self._vectorizer.transform(featuresets)
y_proba_list = self._clf.predict_proba(X)
return [self._make_probdist(y_proba) for y_proba in y_proba_list]
def labels(self):
"""The class labels used by this classifier.
:rtype: list
"""
return list(self._encoder.classes_)
def train(self, labeled_featuresets):
"""
Train (fit) the scikit-learn estimator.
:param labeled_featuresets: A list of ``(featureset, label)``
where each ``featureset`` is a dict mapping strings to either
numbers, booleans or strings.
"""
X, y = list(compat.izip(*labeled_featuresets))
X = self._vectorizer.fit_transform(X)
y = self._encoder.fit_transform(y)
self._clf.fit(X, y)
return self
def _make_probdist(self, y_proba):
classes = self._encoder.classes_
return DictionaryProbDist(dict((classes[i], p)
for i, p in enumerate(y_proba)))
# skip doctests if scikit-learn is not installed
def setup_module(module):
from nose import SkipTest
try:
import sklearn
except ImportError:
raise SkipTest("scikit-learn is not installed")
if __name__ == "__main__":
from nltk.classify.util import names_demo, names_demo_features
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import BernoulliNB
# Bernoulli Naive Bayes is designed for binary classification. We set the
# binarize option to False since we know we're passing boolean features.
print("scikit-learn Naive Bayes:")
names_demo(SklearnClassifier(BernoulliNB(binarize=False)).train,
features=names_demo_features)
# The C parameter on logistic regression (MaxEnt) controls regularization.
# The higher it's set, the less regularized the classifier is.
print("\n\nscikit-learn logistic regression:")
names_demo(SklearnClassifier(LogisticRegression(C=1000)).train,
features=names_demo_features)
| gpl-3.0 |
Garrett-R/scikit-learn | benchmarks/bench_multilabel_metrics.py | 11 | 7258 | #!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': f1_score,
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
return_indicator=True,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
return_indicator=True,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: '.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
| bsd-3-clause |
cmtm/networkx | networkx/convert.py | 2 | 13254 | """Functions to convert NetworkX graphs to and from other formats.
The preferred way of converting data to a NetworkX graph is through the
graph constuctor. The constructor calls the to_networkx_graph() function
which attempts to guess the input type and convert it automatically.
Examples
--------
Create a graph with a single edge from a dictionary of dictionaries
>>> d={0: {1: 1}} # dict-of-dicts single edge (0,1)
>>> G=nx.Graph(d)
See Also
--------
nx_agraph, nx_pydot
"""
# Copyright (C) 2006-2013 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import warnings
import networkx as nx
__author__ = """\n""".join(['Aric Hagberg <[email protected]>',
'Pieter Swart ([email protected])',
'Dan Schult([email protected])'])
__all__ = ['to_networkx_graph',
'from_dict_of_dicts', 'to_dict_of_dicts',
'from_dict_of_lists', 'to_dict_of_lists',
'from_edgelist', 'to_edgelist']
def _prep_create_using(create_using):
"""Return a graph object ready to be populated.
If create_using is None return the default (just networkx.Graph())
If create_using.clear() works, assume it returns a graph object.
Otherwise raise an exception because create_using is not a networkx graph.
"""
if create_using is None:
return nx.Graph()
try:
create_using.clear()
except:
raise TypeError("Input graph is not a networkx graph type")
return create_using
def to_networkx_graph(data,create_using=None,multigraph_input=False):
"""Make a NetworkX graph from a known data structure.
The preferred way to call this is automatically
from the class constructor
>>> d={0: {1: {'weight':1}}} # dict-of-dicts single edge (0,1)
>>> G=nx.Graph(d)
instead of the equivalent
>>> G=nx.from_dict_of_dicts(d)
Parameters
----------
data : object to be converted
Current known types are:
any NetworkX graph
dict-of-dicts
dist-of-lists
list of edges
numpy matrix
numpy ndarray
scipy sparse matrix
pygraphviz agraph
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
multigraph_input : bool (default False)
If True and data is a dict_of_dicts,
try to create a multigraph assuming dict_of_dict_of_lists.
If data and create_using are both multigraphs then create
a multigraph from a multigraph.
"""
# NX graph
if hasattr(data,"adj"):
try:
result= from_dict_of_dicts(data.adj,\
create_using=create_using,\
multigraph_input=data.is_multigraph())
if hasattr(data,'graph'): # data.graph should be dict-like
result.graph.update(data.graph)
if hasattr(data,'node'): # data.node should be dict-like
result.node.update( (n,dd.copy()) for n,dd in data.node.items() )
return result
except:
raise nx.NetworkXError("Input is not a correct NetworkX graph.")
# pygraphviz agraph
if hasattr(data,"is_strict"):
try:
return nx.nx_agraph.from_agraph(data,create_using=create_using)
except:
raise nx.NetworkXError("Input is not a correct pygraphviz graph.")
# dict of dicts/lists
if isinstance(data,dict):
try:
return from_dict_of_dicts(data,create_using=create_using,\
multigraph_input=multigraph_input)
except:
try:
return from_dict_of_lists(data,create_using=create_using)
except:
raise TypeError("Input is not known type.")
# list or generator of edges
if (isinstance(data,list)
or isinstance(data,tuple)
or hasattr(data,'next')
or hasattr(data, '__next__')):
try:
return from_edgelist(data,create_using=create_using)
except:
raise nx.NetworkXError("Input is not a valid edge list")
# Pandas DataFrame
try:
import pandas as pd
if isinstance(data, pd.DataFrame):
try:
return nx.from_pandas_dataframe(data, create_using=create_using)
except:
msg = "Input is not a correct Pandas DataFrame."
raise nx.NetworkXError(msg)
except ImportError:
msg = 'pandas not found, skipping conversion test.'
warnings.warn(msg, ImportWarning)
# numpy matrix or ndarray
try:
import numpy
if isinstance(data,numpy.matrix) or \
isinstance(data,numpy.ndarray):
try:
return nx.from_numpy_matrix(data,create_using=create_using)
except:
raise nx.NetworkXError(\
"Input is not a correct numpy matrix or array.")
except ImportError:
warnings.warn('numpy not found, skipping conversion test.',
ImportWarning)
# scipy sparse matrix - any format
try:
import scipy
if hasattr(data,"format"):
try:
return nx.from_scipy_sparse_matrix(data,create_using=create_using)
except:
raise nx.NetworkXError(\
"Input is not a correct scipy sparse matrix type.")
except ImportError:
warnings.warn('scipy not found, skipping conversion test.',
ImportWarning)
raise nx.NetworkXError(\
"Input is not a known data type for conversion.")
return
def convert_to_undirected(G):
"""Return a new undirected representation of the graph G."""
return G.to_undirected()
def convert_to_directed(G):
"""Return a new directed representation of the graph G."""
return G.to_directed()
def to_dict_of_lists(G,nodelist=None):
"""Return adjacency representation of graph as a dictionary of lists.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
Notes
-----
Completely ignores edge data for MultiGraph and MultiDiGraph.
"""
if nodelist is None:
nodelist=G
d = {}
for n in nodelist:
d[n]=[nbr for nbr in G.neighbors(n) if nbr in nodelist]
return d
def from_dict_of_lists(d,create_using=None):
"""Return a graph from a dictionary of lists.
Parameters
----------
d : dictionary of lists
A dictionary of lists adjacency representation.
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
Examples
--------
>>> dol= {0:[1]} # single edge (0,1)
>>> G=nx.from_dict_of_lists(dol)
or
>>> G=nx.Graph(dol) # use Graph constructor
"""
G=_prep_create_using(create_using)
G.add_nodes_from(d)
if G.is_multigraph() and not G.is_directed():
# a dict_of_lists can't show multiedges. BUT for undirected graphs,
# each edge shows up twice in the dict_of_lists.
# So we need to treat this case separately.
seen={}
for node,nbrlist in d.items():
for nbr in nbrlist:
if nbr not in seen:
G.add_edge(node,nbr)
seen[node]=1 # don't allow reverse edge to show up
else:
G.add_edges_from( ((node,nbr) for node,nbrlist in d.items()
for nbr in nbrlist) )
return G
def to_dict_of_dicts(G,nodelist=None,edge_data=None):
"""Return adjacency representation of graph as a dictionary of dictionaries.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
edge_data : list, optional
If provided, the value of the dictionary will be
set to edge_data for all edges. This is useful to make
an adjacency matrix type representation with 1 as the edge data.
If edgedata is None, the edgedata in G is used to fill the values.
If G is a multigraph, the edgedata is a dict for each pair (u,v).
"""
dod={}
if nodelist is None:
if edge_data is None:
for u,nbrdict in G.adjacency():
dod[u]=nbrdict.copy()
else: # edge_data is not None
for u,nbrdict in G.adjacency():
dod[u]=dod.fromkeys(nbrdict, edge_data)
else: # nodelist is not None
if edge_data is None:
for u in nodelist:
dod[u]={}
for v,data in ((v,data) for v,data in G[u].items() if v in nodelist):
dod[u][v]=data
else: # nodelist and edge_data are not None
for u in nodelist:
dod[u]={}
for v in ( v for v in G[u] if v in nodelist):
dod[u][v]=edge_data
return dod
def from_dict_of_dicts(d,create_using=None,multigraph_input=False):
"""Return a graph from a dictionary of dictionaries.
Parameters
----------
d : dictionary of dictionaries
A dictionary of dictionaries adjacency representation.
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
multigraph_input : bool (default False)
When True, the values of the inner dict are assumed
to be containers of edge data for multiple edges.
Otherwise this routine assumes the edge data are singletons.
Examples
--------
>>> dod= {0: {1:{'weight':1}}} # single edge (0,1)
>>> G=nx.from_dict_of_dicts(dod)
or
>>> G=nx.Graph(dod) # use Graph constructor
"""
G=_prep_create_using(create_using)
G.add_nodes_from(d)
# is dict a MultiGraph or MultiDiGraph?
if multigraph_input:
# make a copy of the list of edge data (but not the edge data)
if G.is_directed():
if G.is_multigraph():
G.add_edges_from( (u,v,key,data)
for u,nbrs in d.items()
for v,datadict in nbrs.items()
for key,data in datadict.items()
)
else:
G.add_edges_from( (u,v,data)
for u,nbrs in d.items()
for v,datadict in nbrs.items()
for key,data in datadict.items()
)
else: # Undirected
if G.is_multigraph():
seen=set() # don't add both directions of undirected graph
for u,nbrs in d.items():
for v,datadict in nbrs.items():
if (u,v) not in seen:
G.add_edges_from( (u,v,key,data)
for key,data in datadict.items()
)
seen.add((v,u))
else:
seen=set() # don't add both directions of undirected graph
for u,nbrs in d.items():
for v,datadict in nbrs.items():
if (u,v) not in seen:
G.add_edges_from( (u,v,data)
for key,data in datadict.items() )
seen.add((v,u))
else: # not a multigraph to multigraph transfer
if G.is_multigraph() and not G.is_directed():
# d can have both representations u-v, v-u in dict. Only add one.
# We don't need this check for digraphs since we add both directions,
# or for Graph() since it is done implicitly (parallel edges not allowed)
seen=set()
for u,nbrs in d.items():
for v,data in nbrs.items():
if (u,v) not in seen:
G.add_edge(u,v,key=0)
G[u][v][0].update(data)
seen.add((v,u))
else:
G.add_edges_from( ( (u,v,data)
for u,nbrs in d.items()
for v,data in nbrs.items()) )
return G
def to_edgelist(G,nodelist=None):
"""Return a list of edges in the graph.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
"""
if nodelist is None:
return G.edges(data=True)
else:
return G.edges(nodelist,data=True)
def from_edgelist(edgelist,create_using=None):
"""Return a graph from a list of edges.
Parameters
----------
edgelist : list or iterator
Edge tuples
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
Examples
--------
>>> edgelist= [(0,1)] # single edge (0,1)
>>> G=nx.from_edgelist(edgelist)
or
>>> G=nx.Graph(edgelist) # use Graph constructor
"""
G=_prep_create_using(create_using)
G.add_edges_from(edgelist)
return G
| bsd-3-clause |
loretoparisi/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/delaunay/interpolate.py | 73 | 7068 | import numpy as np
from matplotlib._delaunay import compute_planes, linear_interpolate_grid, nn_interpolate_grid
from matplotlib._delaunay import nn_interpolate_unstructured
__all__ = ['LinearInterpolator', 'NNInterpolator']
def slice2gridspec(key):
"""Convert a 2-tuple of slices to start,stop,steps for x and y.
key -- (slice(ystart,ystop,ystep), slice(xtart, xstop, xstep))
For now, the only accepted step values are imaginary integers (interpreted
in the same way numpy.mgrid, etc. do).
"""
if ((len(key) != 2) or
(not isinstance(key[0], slice)) or
(not isinstance(key[1], slice))):
raise ValueError("only 2-D slices, please")
x0 = key[1].start
x1 = key[1].stop
xstep = key[1].step
if not isinstance(xstep, complex) or int(xstep.real) != xstep.real:
raise ValueError("only the [start:stop:numsteps*1j] form supported")
xstep = int(xstep.imag)
y0 = key[0].start
y1 = key[0].stop
ystep = key[0].step
if not isinstance(ystep, complex) or int(ystep.real) != ystep.real:
raise ValueError("only the [start:stop:numsteps*1j] form supported")
ystep = int(ystep.imag)
return x0, x1, xstep, y0, y1, ystep
class LinearInterpolator(object):
"""Interpolate a function defined on the nodes of a triangulation by
using the planes defined by the three function values at each corner of
the triangles.
LinearInterpolator(triangulation, z, default_value=numpy.nan)
triangulation -- Triangulation instance
z -- the function values at each node of the triangulation
default_value -- a float giving the default value should the interpolating
point happen to fall outside of the convex hull of the triangulation
At the moment, the only regular rectangular grids are supported for
interpolation.
vals = interp[ystart:ystop:ysteps*1j, xstart:xstop:xsteps*1j]
vals would then be a (ysteps, xsteps) array containing the interpolated
values. These arguments are interpreted the same way as numpy.mgrid.
Attributes:
planes -- (ntriangles, 3) array of floats specifying the plane for each
triangle.
Linear Interpolation
--------------------
Given the Delauany triangulation (or indeed *any* complete triangulation) we
can interpolate values inside the convex hull by locating the enclosing
triangle of the interpolation point and returning the value at that point of
the plane defined by the three node values.
f = planes[tri,0]*x + planes[tri,1]*y + planes[tri,2]
The interpolated function is C0 continuous across the convex hull of the
input points. It is C1 continuous across the convex hull except for the
nodes and the edges of the triangulation.
"""
def __init__(self, triangulation, z, default_value=np.nan):
self.triangulation = triangulation
self.z = np.asarray(z, dtype=np.float64)
self.default_value = default_value
self.planes = compute_planes(triangulation.x, triangulation.y, self.z,
triangulation.triangle_nodes)
def __getitem__(self, key):
x0, x1, xstep, y0, y1, ystep = slice2gridspec(key)
grid = linear_interpolate_grid(x0, x1, xstep, y0, y1, ystep, self.default_value,
self.planes, self.triangulation.x, self.triangulation.y,
self.triangulation.triangle_nodes, self.triangulation.triangle_neighbors)
return grid
class NNInterpolator(object):
"""Interpolate a function defined on the nodes of a triangulation by
the natural neighbors method.
NNInterpolator(triangulation, z, default_value=numpy.nan)
triangulation -- Triangulation instance
z -- the function values at each node of the triangulation
default_value -- a float giving the default value should the interpolating
point happen to fall outside of the convex hull of the triangulation
At the moment, the only regular rectangular grids are supported for
interpolation.
vals = interp[ystart:ystop:ysteps*1j, xstart:xstop:xsteps*1j]
vals would then be a (ysteps, xsteps) array containing the interpolated
values. These arguments are interpreted the same way as numpy.mgrid.
Natural Neighbors Interpolation
-------------------------------
One feature of the Delaunay triangulation is that for each triangle, its
circumcircle contains no other point (although in degenerate cases, like
squares, other points may be *on* the circumcircle). One can also construct
what is called the Voronoi diagram from a Delaunay triangulation by
connecting the circumcenters of the triangles to those of their neighbors to
form a tesselation of irregular polygons covering the plane and containing
only one node from the triangulation. Each point in one node's Voronoi
polygon is closer to that node than any other node.
To compute the Natural Neighbors interpolant, we consider adding the
interpolation point to the triangulation. We define the natural neighbors of
this point as the set of nodes participating in Delaunay triangles whose
circumcircles contain the point. To restore the Delaunay-ness of the
triangulation, one would only have to alter those triangles and Voronoi
polygons. The new Voronooi diagram would have a polygon around the inserted
point. This polygon would "steal" area from the original Voronoi polygons.
For each node i in the natural neighbors set, we compute the area stolen
from its original Voronoi polygon, stolen[i]. We define the natural
neighbors coordinates
phi[i] = stolen[i] / sum(stolen,axis=0)
We then use these phi[i] to weight the corresponding function values from
the input data z to compute the interpolated value.
The interpolated surface is C1-continuous except at the nodes themselves
across the convex hull of the input points. One can find the set of points
that a given node will affect by computing the union of the areas covered by
the circumcircles of each Delaunay triangle that node participates in.
"""
def __init__(self, triangulation, z, default_value=np.nan):
self.triangulation = triangulation
self.z = np.asarray(z, dtype=np.float64)
self.default_value = default_value
def __getitem__(self, key):
x0, x1, xstep, y0, y1, ystep = slice2gridspec(key)
grid = nn_interpolate_grid(x0, x1, xstep, y0, y1, ystep, self.default_value,
self.triangulation.x, self.triangulation.y, self.z,
self.triangulation.circumcenters,
self.triangulation.triangle_nodes,
self.triangulation.triangle_neighbors)
return grid
def __call__(self, intx, inty):
intz = nn_interpolate_unstructured(intx, inty, self.default_value,
self.triangulation.x, self.triangulation.y, self.z,
self.triangulation.circumcenters,
self.triangulation.triangle_nodes,
self.triangulation.triangle_neighbors)
return intz
| agpl-3.0 |
Kleptobismol/scikit-bio | skbio/stats/distance/tests/test_base.py | 1 | 28001 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from future.builtins import zip
from six import StringIO, binary_type, text_type
from unittest import TestCase, main
import matplotlib as mpl
mpl.use('Agg')
import numpy as np
import numpy.testing as npt
import pandas as pd
from IPython.core.display import Image, SVG
from skbio import DistanceMatrix
from skbio.stats.distance import (
DissimilarityMatrixError, DistanceMatrixError, MissingIDError,
DissimilarityMatrix, randdm, CategoricalStatsResults)
from skbio.stats.distance._base import (
_preprocess_input, _run_monte_carlo_stats, CategoricalStats)
class DissimilarityMatrixTestData(TestCase):
def setUp(self):
self.dm_1x1_data = [[0.0]]
self.dm_2x2_data = [[0.0, 0.123], [0.123, 0.0]]
self.dm_2x2_asym_data = [[0.0, 1.0], [-2.0, 0.0]]
self.dm_3x3_data = [[0.0, 0.01, 4.2], [0.01, 0.0, 12.0],
[4.2, 12.0, 0.0]]
class DissimilarityMatrixTests(DissimilarityMatrixTestData):
def setUp(self):
super(DissimilarityMatrixTests, self).setUp()
self.dm_1x1 = DissimilarityMatrix(self.dm_1x1_data, ['a'])
self.dm_2x2 = DissimilarityMatrix(self.dm_2x2_data, ['a', 'b'])
self.dm_2x2_asym = DissimilarityMatrix(self.dm_2x2_asym_data,
['a', 'b'])
self.dm_3x3 = DissimilarityMatrix(self.dm_3x3_data, ['a', 'b', 'c'])
self.dms = [self.dm_1x1, self.dm_2x2, self.dm_2x2_asym, self.dm_3x3]
self.dm_shapes = [(1, 1), (2, 2), (2, 2), (3, 3)]
self.dm_sizes = [1, 4, 4, 9]
self.dm_transposes = [
self.dm_1x1, self.dm_2x2,
DissimilarityMatrix([[0, -2], [1, 0]], ['a', 'b']), self.dm_3x3]
self.dm_redundant_forms = [np.array(self.dm_1x1_data),
np.array(self.dm_2x2_data),
np.array(self.dm_2x2_asym_data),
np.array(self.dm_3x3_data)]
def test_deprecated_io(self):
fh = StringIO()
npt.assert_warns(DeprecationWarning, self.dm_3x3.to_file, fh)
fh.seek(0)
deserialized = npt.assert_warns(DeprecationWarning,
DissimilarityMatrix.from_file, fh)
self.assertEqual(deserialized, self.dm_3x3)
self.assertTrue(type(deserialized) == DissimilarityMatrix)
def test_init_from_dm(self):
ids = ['foo', 'bar', 'baz']
# DissimilarityMatrix -> DissimilarityMatrix
exp = DissimilarityMatrix(self.dm_3x3_data, ids)
obs = DissimilarityMatrix(self.dm_3x3, ids)
self.assertEqual(obs, exp)
# Test that copy of data is not made.
self.assertTrue(obs.data is self.dm_3x3.data)
obs.data[0, 1] = 424242
self.assertTrue(np.array_equal(obs.data, self.dm_3x3.data))
# DistanceMatrix -> DissimilarityMatrix
exp = DissimilarityMatrix(self.dm_3x3_data, ids)
obs = DissimilarityMatrix(
DistanceMatrix(self.dm_3x3_data, ('a', 'b', 'c')), ids)
self.assertEqual(obs, exp)
# DissimilarityMatrix -> DistanceMatrix
with self.assertRaises(DistanceMatrixError):
DistanceMatrix(self.dm_2x2_asym, ['foo', 'bar'])
def test_init_no_ids(self):
exp = DissimilarityMatrix(self.dm_3x3_data, ('0', '1', '2'))
obs = DissimilarityMatrix(self.dm_3x3_data)
self.assertEqual(obs, exp)
self.assertEqual(obs['1', '2'], 12.0)
def test_init_invalid_input(self):
# Empty data.
with self.assertRaises(DissimilarityMatrixError):
DissimilarityMatrix([], [])
# Another type of empty data.
with self.assertRaises(DissimilarityMatrixError):
DissimilarityMatrix(np.empty((0, 0)), [])
# Invalid number of dimensions.
with self.assertRaises(DissimilarityMatrixError):
DissimilarityMatrix([1, 2, 3], ['a'])
# Dimensions don't match.
with self.assertRaises(DissimilarityMatrixError):
DissimilarityMatrix([[1, 2, 3]], ['a'])
data = [[0, 1], [1, 0]]
# Duplicate IDs.
with self.assertRaises(DissimilarityMatrixError):
DissimilarityMatrix(data, ['a', 'a'])
# Number of IDs don't match dimensions.
with self.assertRaises(DissimilarityMatrixError):
DissimilarityMatrix(data, ['a', 'b', 'c'])
# Non-hollow.
data = [[0.0, 1.0], [1.0, 0.01]]
with self.assertRaises(DissimilarityMatrixError):
DissimilarityMatrix(data, ['a', 'b'])
def test_data(self):
for dm, exp in zip(self.dms, self.dm_redundant_forms):
obs = dm.data
self.assertTrue(np.array_equal(obs, exp))
with self.assertRaises(AttributeError):
self.dm_3x3.data = 'foo'
def test_ids(self):
obs = self.dm_3x3.ids
self.assertEqual(obs, ('a', 'b', 'c'))
# Test that we overwrite the existing IDs and that the ID index is
# correctly rebuilt.
new_ids = ['foo', 'bar', 'baz']
self.dm_3x3.ids = new_ids
obs = self.dm_3x3.ids
self.assertEqual(obs, tuple(new_ids))
self.assertTrue(np.array_equal(self.dm_3x3['bar'],
np.array([0.01, 0.0, 12.0])))
with self.assertRaises(MissingIDError):
self.dm_3x3['b']
def test_ids_invalid_input(self):
with self.assertRaises(DissimilarityMatrixError):
self.dm_3x3.ids = ['foo', 'bar']
# Make sure that we can still use the dissimilarity matrix after trying
# to be evil.
obs = self.dm_3x3.ids
self.assertEqual(obs, ('a', 'b', 'c'))
def test_dtype(self):
for dm in self.dms:
self.assertEqual(dm.dtype, np.float64)
def test_shape(self):
for dm, shape in zip(self.dms, self.dm_shapes):
self.assertEqual(dm.shape, shape)
def test_size(self):
for dm, size in zip(self.dms, self.dm_sizes):
self.assertEqual(dm.size, size)
def test_transpose(self):
for dm, transpose in zip(self.dms, self.dm_transposes):
self.assertEqual(dm.T, transpose)
self.assertEqual(dm.transpose(), transpose)
# We should get a reference to a different object back, even if the
# transpose is the same as the original.
self.assertTrue(dm.transpose() is not dm)
def test_index(self):
self.assertEqual(self.dm_3x3.index('a'), 0)
self.assertEqual(self.dm_3x3.index('b'), 1)
self.assertEqual(self.dm_3x3.index('c'), 2)
with self.assertRaises(MissingIDError):
self.dm_3x3.index('d')
with self.assertRaises(MissingIDError):
self.dm_3x3.index(1)
def test_redundant_form(self):
for dm, redundant in zip(self.dms, self.dm_redundant_forms):
obs = dm.redundant_form()
self.assertTrue(np.array_equal(obs, redundant))
def test_copy(self):
copy = self.dm_2x2.copy()
self.assertEqual(copy, self.dm_2x2)
self.assertFalse(copy.data is self.dm_2x2.data)
# deepcopy doesn't actually create a copy of the IDs because it is a
# tuple of strings, which is fully immutable.
self.assertTrue(copy.ids is self.dm_2x2.ids)
new_ids = ['hello', 'world']
copy.ids = new_ids
self.assertNotEqual(copy.ids, self.dm_2x2.ids)
copy = self.dm_2x2.copy()
copy.data[0, 1] = 0.0001
self.assertFalse(np.array_equal(copy.data, self.dm_2x2.data))
def test_filter_no_filtering(self):
# Don't actually filter anything -- ensure we get back a different
# object.
obs = self.dm_3x3.filter(['a', 'b', 'c'])
self.assertEqual(obs, self.dm_3x3)
self.assertFalse(obs is self.dm_3x3)
def test_filter_reorder(self):
# Don't filter anything, but reorder the distance matrix.
order = ['c', 'a', 'b']
exp = DissimilarityMatrix(
[[0, 4.2, 12], [4.2, 0, 0.01], [12, 0.01, 0]], order)
obs = self.dm_3x3.filter(order)
self.assertEqual(obs, exp)
def test_filter_single_id(self):
ids = ['b']
exp = DissimilarityMatrix([[0]], ids)
obs = self.dm_2x2_asym.filter(ids)
self.assertEqual(obs, exp)
def test_filter_asymmetric(self):
# 2x2
ids = ['b', 'a']
exp = DissimilarityMatrix([[0, -2], [1, 0]], ids)
obs = self.dm_2x2_asym.filter(ids)
self.assertEqual(obs, exp)
# 3x3
dm = DissimilarityMatrix([[0, 10, 53], [42, 0, 22.5], [53, 1, 0]],
('bro', 'brah', 'breh'))
ids = ['breh', 'brah']
exp = DissimilarityMatrix([[0, 1], [22.5, 0]], ids)
obs = dm.filter(ids)
self.assertEqual(obs, exp)
def test_filter_subset(self):
ids = ('c', 'a')
exp = DissimilarityMatrix([[0, 4.2], [4.2, 0]], ids)
obs = self.dm_3x3.filter(ids)
self.assertEqual(obs, exp)
ids = ('b', 'a')
exp = DissimilarityMatrix([[0, 0.01], [0.01, 0]], ids)
obs = self.dm_3x3.filter(ids)
self.assertEqual(obs, exp)
# 4x4
dm = DissimilarityMatrix([[0, 1, 55, 7], [1, 0, 16, 1],
[55, 16, 0, 23], [7, 1, 23, 0]])
ids = np.asarray(['3', '0', '1'])
exp = DissimilarityMatrix([[0, 7, 1], [7, 0, 1], [1, 1, 0]], ids)
obs = dm.filter(ids)
self.assertEqual(obs, exp)
def test_filter_duplicate_ids(self):
with self.assertRaises(DissimilarityMatrixError):
self.dm_3x3.filter(['c', 'a', 'c'])
def test_filter_missing_ids(self):
with self.assertRaises(MissingIDError):
self.dm_3x3.filter(['c', 'bro'])
def test_filter_missing_ids_strict_false(self):
# no exception should be raised
ids = ('c', 'a')
exp = DissimilarityMatrix([[0, 4.2], [4.2, 0]], ids)
obs = self.dm_3x3.filter(['c', 'a', 'not found'], strict=False)
self.assertEqual(obs, exp)
def test_filter_empty_ids(self):
with self.assertRaises(DissimilarityMatrixError):
self.dm_3x3.filter([])
def test_plot_default(self):
fig = self.dm_1x1.plot()
self.assertIsInstance(fig, mpl.figure.Figure)
axes = fig.get_axes()
self.assertEqual(len(axes), 2)
ax = axes[0]
self.assertEqual(ax.get_title(), '')
xticks = []
for tick in ax.get_xticklabels():
xticks.append(tick.get_text())
self.assertEqual(xticks, ['a'])
yticks = []
for tick in ax.get_yticklabels():
yticks.append(tick.get_text())
self.assertEqual(yticks, ['a'])
def test_plot_no_default(self):
ids = ['0', 'one', '2', 'three', '4.000']
data = ([0, 1, 2, 3, 4], [1, 0, 1, 2, 3], [2, 1, 0, 1, 2],
[3, 2, 1, 0, 1], [4, 3, 2, 1, 0])
dm = DissimilarityMatrix(data, ids)
fig = dm.plot(cmap='Reds', title='Testplot')
self.assertIsInstance(fig, mpl.figure.Figure)
axes = fig.get_axes()
self.assertEqual(len(axes), 2)
ax = axes[0]
self.assertEqual(ax.get_title(), 'Testplot')
xticks = []
for tick in ax.get_xticklabels():
xticks.append(tick.get_text())
self.assertEqual(xticks, ['0', 'one', '2', 'three', '4.000'])
yticks = []
for tick in ax.get_yticklabels():
yticks.append(tick.get_text())
self.assertEqual(yticks, ['0', 'one', '2', 'three', '4.000'])
def test_repr_png(self):
dm = self.dm_1x1
obs = dm._repr_png_()
self.assertIsInstance(obs, binary_type)
self.assertTrue(len(obs) > 0)
def test_repr_svg(self):
obs = self.dm_1x1._repr_svg_()
# print_figure(format='svg') can return text or bytes depending on the
# version of IPython
self.assertTrue(isinstance(obs, text_type) or
isinstance(obs, binary_type))
self.assertTrue(len(obs) > 0)
def test_png(self):
dm = self.dm_1x1
self.assertIsInstance(dm.png, Image)
def test_svg(self):
dm = self.dm_1x1
self.assertIsInstance(dm.svg, SVG)
def test_str(self):
for dm in self.dms:
obs = str(dm)
# Do some very light testing here to make sure we're getting a
# non-empty string back. We don't want to test the exact
# formatting.
self.assertTrue(obs)
def test_eq(self):
for dm in self.dms:
copy = dm.copy()
self.assertTrue(dm == dm)
self.assertTrue(copy == copy)
self.assertTrue(dm == copy)
self.assertTrue(copy == dm)
self.assertFalse(self.dm_1x1 == self.dm_3x3)
def test_ne(self):
# Wrong class.
self.assertTrue(self.dm_3x3 != 'foo')
# Wrong shape.
self.assertTrue(self.dm_3x3 != self.dm_1x1)
# Wrong IDs.
other = self.dm_3x3.copy()
other.ids = ['foo', 'bar', 'baz']
self.assertTrue(self.dm_3x3 != other)
# Wrong data.
other = self.dm_3x3.copy()
other.data[1, 0] = 42.42
self.assertTrue(self.dm_3x3 != other)
self.assertFalse(self.dm_2x2 != self.dm_2x2)
def test_contains(self):
self.assertTrue('a' in self.dm_3x3)
self.assertTrue('b' in self.dm_3x3)
self.assertTrue('c' in self.dm_3x3)
self.assertFalse('d' in self.dm_3x3)
def test_getslice(self):
# Slice of first dimension only. Test that __getslice__ defers to
# __getitem__.
obs = self.dm_2x2[1:]
self.assertTrue(np.array_equal(obs, np.array([[0.123, 0.0]])))
self.assertEqual(type(obs), np.ndarray)
def test_getitem_by_id(self):
obs = self.dm_1x1['a']
self.assertTrue(np.array_equal(obs, np.array([0.0])))
obs = self.dm_2x2_asym['b']
self.assertTrue(np.array_equal(obs, np.array([-2.0, 0.0])))
obs = self.dm_3x3['c']
self.assertTrue(np.array_equal(obs, np.array([4.2, 12.0, 0.0])))
with self.assertRaises(MissingIDError):
self.dm_2x2['c']
def test_getitem_by_id_pair(self):
# Same object.
self.assertEqual(self.dm_1x1['a', 'a'], 0.0)
# Different objects (symmetric).
self.assertEqual(self.dm_3x3['b', 'c'], 12.0)
self.assertEqual(self.dm_3x3['c', 'b'], 12.0)
# Different objects (asymmetric).
self.assertEqual(self.dm_2x2_asym['a', 'b'], 1.0)
self.assertEqual(self.dm_2x2_asym['b', 'a'], -2.0)
with self.assertRaises(MissingIDError):
self.dm_2x2['a', 'c']
def test_getitem_ndarray_indexing(self):
# Single element access.
obs = self.dm_3x3[0, 1]
self.assertEqual(obs, 0.01)
# Single element access (via two __getitem__ calls).
obs = self.dm_3x3[0][1]
self.assertEqual(obs, 0.01)
# Row access.
obs = self.dm_3x3[1]
self.assertTrue(np.array_equal(obs, np.array([0.01, 0.0, 12.0])))
self.assertEqual(type(obs), np.ndarray)
# Grab all data.
obs = self.dm_3x3[:, :]
self.assertTrue(np.array_equal(obs, self.dm_3x3.data))
self.assertEqual(type(obs), np.ndarray)
with self.assertRaises(IndexError):
self.dm_3x3[:, 3]
def test_validate_invalid_dtype(self):
with self.assertRaises(DissimilarityMatrixError):
self.dm_3x3._validate(np.array([[0, 42], [42, 0]]), ['a', 'b'])
class DistanceMatrixTests(DissimilarityMatrixTestData):
def setUp(self):
super(DistanceMatrixTests, self).setUp()
self.dm_1x1 = DistanceMatrix(self.dm_1x1_data, ['a'])
self.dm_2x2 = DistanceMatrix(self.dm_2x2_data, ['a', 'b'])
self.dm_3x3 = DistanceMatrix(self.dm_3x3_data, ['a', 'b', 'c'])
self.dms = [self.dm_1x1, self.dm_2x2, self.dm_3x3]
self.dm_condensed_forms = [np.array([]), np.array([0.123]),
np.array([0.01, 4.2, 12.0])]
def test_deprecated_io(self):
fh = StringIO()
npt.assert_warns(DeprecationWarning, self.dm_3x3.to_file, fh)
fh.seek(0)
deserialized = npt.assert_warns(DeprecationWarning,
DistanceMatrix.from_file, fh)
self.assertEqual(deserialized, self.dm_3x3)
self.assertTrue(type(deserialized) == DistanceMatrix)
def test_init_invalid_input(self):
# Asymmetric.
data = [[0.0, 2.0], [1.0, 0.0]]
with self.assertRaises(DistanceMatrixError):
DistanceMatrix(data, ['a', 'b'])
# Ensure that the superclass validation is still being performed.
with self.assertRaises(DissimilarityMatrixError):
DistanceMatrix([[1, 2, 3]], ['a'])
def test_condensed_form(self):
for dm, condensed in zip(self.dms, self.dm_condensed_forms):
obs = dm.condensed_form()
self.assertTrue(np.array_equal(obs, condensed))
def test_permute_condensed(self):
# Can't really permute a 1x1 or 2x2...
for _ in range(2):
obs = self.dm_1x1.permute(condensed=True)
npt.assert_equal(obs, np.array([]))
for _ in range(2):
obs = self.dm_2x2.permute(condensed=True)
npt.assert_equal(obs, np.array([0.123]))
dm_copy = self.dm_3x3.copy()
np.random.seed(0)
obs = self.dm_3x3.permute(condensed=True)
npt.assert_equal(obs, np.array([12.0, 4.2, 0.01]))
obs = self.dm_3x3.permute(condensed=True)
npt.assert_equal(obs, np.array([4.2, 12.0, 0.01]))
# Ensure dm hasn't changed after calling permute() on it a couple of
# times.
self.assertEqual(self.dm_3x3, dm_copy)
def test_permute_not_condensed(self):
obs = self.dm_1x1.permute()
self.assertEqual(obs, self.dm_1x1)
self.assertFalse(obs is self.dm_1x1)
obs = self.dm_2x2.permute()
self.assertEqual(obs, self.dm_2x2)
self.assertFalse(obs is self.dm_2x2)
np.random.seed(0)
exp = DistanceMatrix([[0, 12, 4.2],
[12, 0, 0.01],
[4.2, 0.01, 0]], self.dm_3x3.ids)
obs = self.dm_3x3.permute()
self.assertEqual(obs, exp)
exp = DistanceMatrix([[0, 4.2, 12],
[4.2, 0, 0.01],
[12, 0.01, 0]], self.dm_3x3.ids)
obs = self.dm_3x3.permute()
self.assertEqual(obs, exp)
def test_eq(self):
# Compare DistanceMatrix to DissimilarityMatrix, where both have the
# same data and IDs.
eq_dm = DissimilarityMatrix(self.dm_3x3_data, ['a', 'b', 'c'])
self.assertTrue(self.dm_3x3 == eq_dm)
self.assertTrue(eq_dm == self.dm_3x3)
class RandomDistanceMatrixTests(TestCase):
def test_default_usage(self):
exp = DistanceMatrix(np.asarray([[0.0]]), ['1'])
obs = randdm(1)
self.assertEqual(obs, exp)
obs = randdm(2)
self.assertEqual(obs.shape, (2, 2))
self.assertEqual(obs.ids, ('1', '2'))
obs1 = randdm(5)
num_trials = 10
found_diff = False
for _ in range(num_trials):
obs2 = randdm(5)
if obs1 != obs2:
found_diff = True
break
self.assertTrue(found_diff)
def test_ids(self):
ids = ['foo', 'bar', 'baz']
obs = randdm(3, ids=ids)
self.assertEqual(obs.shape, (3, 3))
self.assertEqual(obs.ids, tuple(ids))
def test_constructor(self):
exp = DissimilarityMatrix(np.asarray([[0.0]]), ['1'])
obs = randdm(1, constructor=DissimilarityMatrix)
self.assertEqual(obs, exp)
self.assertEqual(type(obs), DissimilarityMatrix)
def test_random_fn(self):
def myrand(num_rows, num_cols):
# One dm to rule them all...
data = np.empty((num_rows, num_cols))
data.fill(42)
return data
exp = DistanceMatrix(np.asarray([[0, 42, 42], [42, 0, 42],
[42, 42, 0]]), ['1', '2', '3'])
obs = randdm(3, random_fn=myrand)
self.assertEqual(obs, exp)
def test_invalid_input(self):
# Invalid dimensions.
with self.assertRaises(DissimilarityMatrixError):
randdm(0)
# Invalid dimensions.
with self.assertRaises(ValueError):
randdm(-1)
# Invalid number of IDs.
with self.assertRaises(DissimilarityMatrixError):
randdm(2, ids=['foo'])
class CategoricalStatsHelperFunctionTests(TestCase):
def setUp(self):
self.dm = DistanceMatrix([[0.0, 1.0, 2.0],
[1.0, 0.0, 3.0],
[2.0, 3.0, 0.0]], ['a', 'b', 'c'])
self.grouping = [1, 2, 1]
# Ordering of IDs shouldn't matter, nor should extra IDs.
self.df = pd.read_csv(
StringIO('ID,Group\nb,Group2\na,Group1\nc,Group1\nd,Group3'),
index_col=0)
self.df_missing_id = pd.read_csv(
StringIO('ID,Group\nb,Group2\nc,Group1'), index_col=0)
def test_preprocess_input_with_valid_input(self):
# Should obtain same result using grouping vector or data frame.
exp = (3, 2, np.array([0, 1, 0]),
(np.array([0, 0, 1]), np.array([1, 2, 2])),
np.array([1., 2., 3.]))
obs = _preprocess_input(self.dm, self.grouping, None)
npt.assert_equal(obs, exp)
obs = _preprocess_input(self.dm, self.df, 'Group')
npt.assert_equal(obs, exp)
def test_preprocess_input_raises_error(self):
# Requires a DistanceMatrix.
with self.assertRaises(TypeError):
_preprocess_input(
DissimilarityMatrix([[0, 2], [3, 0]], ['a', 'b']),
[1, 2], None)
# Requires column if DataFrame.
with self.assertRaises(ValueError):
_preprocess_input(self.dm, self.df, None)
# Cannot provide column if not data frame.
with self.assertRaises(ValueError):
_preprocess_input(self.dm, self.grouping, 'Group')
# Column must exist in data frame.
with self.assertRaises(ValueError):
_preprocess_input(self.dm, self.df, 'foo')
# All distance matrix IDs must be in data frame.
with self.assertRaises(ValueError):
_preprocess_input(self.dm, self.df_missing_id, 'Group')
# Grouping vector length must match number of objects in dm.
with self.assertRaises(ValueError):
_preprocess_input(self.dm, [1, 2], None)
# Grouping vector cannot have only unique values.
with self.assertRaises(ValueError):
_preprocess_input(self.dm, [1, 2, 3], None)
# Grouping vector cannot have only a single group.
with self.assertRaises(ValueError):
_preprocess_input(self.dm, [1, 1, 1], None)
def test_run_monte_carlo_stats_with_permutations(self):
obs = _run_monte_carlo_stats(lambda e: 42, self.grouping, 50)
npt.assert_equal(obs, (42, 1.0))
def test_run_monte_carlo_stats_no_permutations(self):
obs = _run_monte_carlo_stats(lambda e: 42, self.grouping, 0)
npt.assert_equal(obs, (42, np.nan))
def test_run_monte_carlo_stats_invalid_permutations(self):
with self.assertRaises(ValueError):
_run_monte_carlo_stats(lambda e: 42, self.grouping, -1)
class CategoricalStatsTests(TestCase):
def setUp(self):
self.dm = DistanceMatrix([[0.0, 1.0, 2.0], [1.0, 0.0, 3.0],
[2.0, 3.0, 0.0]], ['a', 'b', 'c'])
self.grouping = [1, 2, 1]
# Ordering of IDs shouldn't matter, nor should extra IDs.
self.df = pd.read_csv(
StringIO('ID,Group\nb,Group1\na,Group2\nc,Group1\nd,Group3'),
index_col=0)
self.df_missing_id = pd.read_csv(
StringIO('ID,Group\nb,Group1\nc,Group1'), index_col=0)
self.categorical_stats = CategoricalStats(self.dm, self.grouping)
self.categorical_stats_from_df = CategoricalStats(self.dm, self.df,
column='Group')
def test_init_invalid_input(self):
# Requires a DistanceMatrix.
with self.assertRaises(TypeError):
CategoricalStats(DissimilarityMatrix([[0, 2], [3, 0]], ['a', 'b']),
[1, 2])
# Requires column if DataFrame.
with self.assertRaises(ValueError):
CategoricalStats(self.dm, self.df)
# Cannot provide column if not data frame.
with self.assertRaises(ValueError):
CategoricalStats(self.dm, self.grouping, column='Group')
# Column must exist in data frame.
with self.assertRaises(ValueError):
CategoricalStats(self.dm, self.df, column='foo')
# All distance matrix IDs must be in data frame.
with self.assertRaises(ValueError):
CategoricalStats(self.dm, self.df_missing_id, column='Group')
# Grouping vector length must match number of objects in dm.
with self.assertRaises(ValueError):
CategoricalStats(self.dm, [1, 2])
# Grouping vector cannot have only unique values.
with self.assertRaises(ValueError):
CategoricalStats(self.dm, [1, 2, 3])
# Grouping vector cannot have only a single group.
with self.assertRaises(ValueError):
CategoricalStats(self.dm, [1, 1, 1])
def test_call(self):
with self.assertRaises(NotImplementedError):
self.categorical_stats()
def test_call_invalid_permutations(self):
with self.assertRaises(ValueError):
self.categorical_stats(-1)
class CategoricalStatsResultsTests(TestCase):
def setUp(self):
self.results = CategoricalStatsResults('foo', 'Foo', 'my stat', 42,
['a', 'b', 'c', 'd'],
0.01234567890, 0.1151111, 99)
def test_str(self):
exp = ('Method name Sample size Number of groups my stat '
'p-value Number of permutations\n foo 42'
' 4 0.0123456789 0.12'
' 99\n')
obs = str(self.results)
self.assertEqual(obs, exp)
def test_repr_html(self):
# Not going to test against exact HTML that we expect, as this could
# easily break and be annoying to constantly update. Do some light
# sanity-checking to ensure there are some of the expected HTML tags.
obs = self.results._repr_html_()
self.assertTrue('<table' in obs)
self.assertTrue('<thead' in obs)
self.assertTrue('<tr' in obs)
self.assertTrue('<th' in obs)
self.assertTrue('<tbody' in obs)
self.assertTrue('<td' in obs)
def test_summary(self):
exp = ('Method name\tSample size\tNumber of groups\tmy stat\tp-value\t'
'Number of permutations\nfoo\t42\t4\t0.0123456789\t0.12\t99\n')
obs = self.results.summary()
self.assertEqual(obs, exp)
if __name__ == '__main__':
main()
| bsd-3-clause |
mganeva/mantid | qt/applications/workbench/workbench/plotting/figuremanager.py | 1 | 14630 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2017 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
#
#
"""Provides our custom figure manager to wrap the canvas, window and our custom toolbar"""
from __future__ import (absolute_import, unicode_literals)
from functools import wraps
import sys
# 3rdparty imports
from mantid.api import AnalysisDataServiceObserver
from mantid.plots import MantidAxes
from mantid.py3compat import text_type
from mantidqt.plotting.figuretype import FigureType, figure_type
from mantidqt.widgets.fitpropertybrowser import FitPropertyBrowser
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import FigureManagerBase
from matplotlib.backends.backend_qt5agg import (FigureCanvasQTAgg) # noqa
from matplotlib.axes import Axes
from qtpy.QtCore import QObject, Qt
from qtpy.QtWidgets import QApplication, QLabel
# local imports
from .figureinteraction import FigureInteraction
from .figurewindow import FigureWindow
from .qappthreadcall import QAppThreadCall
from .toolbar import WorkbenchNavigationToolbar, ToolbarStateChecker
def _catch_exceptions(func):
"""
Catch all exceptions in method and print a traceback to stderr
"""
@wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception:
sys.stderr.write("Error occurred in handler:\n")
import traceback
traceback.print_exc()
return wrapper
class FigureManagerADSObserver(AnalysisDataServiceObserver):
def __init__(self, manager):
super(FigureManagerADSObserver, self).__init__()
self.window = manager.window
self.canvas = manager.canvas
self.observeClear(True)
self.observeDelete(True)
self.observeReplace(True)
@_catch_exceptions
def clearHandle(self):
"""
Called when the ADS is deleted all of its workspaces
"""
self.window.emit_close()
@_catch_exceptions
def deleteHandle(self, _, workspace):
"""
Called when the ADS has deleted a workspace. Checks the
attached axes for any hold a plot from this workspace. If removing
this leaves empty axes then the parent window is triggered for
closer
:param _: The name of the workspace. Unused
:param workspace: A pointer to the workspace
"""
# Find the axes with this workspace reference
all_axes = self.canvas.figure.axes
if not all_axes:
return
# Here we wish to delete any curves linked to the workspace being
# deleted and if a figure is now empty, close it. We must avoid closing
# any figures that were created via the script window that are not
# managed via a workspace.
# See https://github.com/mantidproject/mantid/issues/25135.
empty_axes = []
for ax in all_axes:
if isinstance(ax, MantidAxes):
ax.remove_workspace_artists(workspace)
# We check for axes type below as a pseudo check for an axes being
# a colorbar. Creating a colorfill plot creates 2 axes: one linked
# to a workspace, the other a colorbar. Deleting the workspace
# deletes the colorfill, but the plot remains open due to the
# non-empty colorbar. This solution seems to work for the majority
# of cases but could lead to unmanaged figures only containing an
# Axes object being closed.
if type(ax) is not Axes:
empty_axes.append(MantidAxes.is_empty(ax))
if all(empty_axes):
self.window.emit_close()
else:
self.canvas.draw_idle()
@_catch_exceptions
def replaceHandle(self, _, workspace):
"""
Called when the ADS has replaced a workspace with one of the same name.
If this workspace is attached to this figure then its data is updated
:param _: The name of the workspace. Unused
:param workspace: A reference to the new workspace
"""
redraw = False
for ax in self.canvas.figure.axes:
if isinstance(ax, MantidAxes):
redraw_this = ax.replace_workspace_artists(workspace)
else:
continue
redraw = redraw | redraw_this
if redraw:
self.canvas.draw_idle()
class FigureManagerWorkbench(FigureManagerBase, QObject):
"""
Attributes
----------
canvas : `FigureCanvas`
The FigureCanvas instance
num : int or str
The Figure number
toolbar : qt.QToolBar
The qt.QToolBar
window : qt.QMainWindow
The qt.QMainWindow
"""
def __init__(self, canvas, num):
QObject.__init__(self)
FigureManagerBase.__init__(self, canvas, num)
# Patch show/destroy to be thread aware
self._destroy_orig = self.destroy
self.destroy = QAppThreadCall(self._destroy_orig)
self._show_orig = self.show
self.show = QAppThreadCall(self._show_orig)
self._window_activated_orig = self._window_activated
self._window_activated = QAppThreadCall(self._window_activated_orig)
self.set_window_title_orig = self.set_window_title
self.set_window_title = QAppThreadCall(self.set_window_title_orig)
self.fig_visibility_changed_orig = self.fig_visibility_changed
self.fig_visibility_changed = QAppThreadCall(self.fig_visibility_changed_orig)
self.window = FigureWindow(canvas)
self.window.activated.connect(self._window_activated)
self.window.closing.connect(canvas.close_event)
self.window.closing.connect(self.destroy)
self.window.visibility_changed.connect(self.fig_visibility_changed)
self.window.setWindowTitle("Figure %d" % num)
canvas.figure.set_label("Figure %d" % num)
# Give the keyboard focus to the figure instead of the
# manager; StrongFocus accepts both tab and click to focus and
# will enable the canvas to process event w/o clicking.
# ClickFocus only takes the focus is the window has been
# clicked
# on. http://qt-project.org/doc/qt-4.8/qt.html#FocusPolicy-enum or
# http://doc.qt.digia.com/qt/qt.html#FocusPolicy-enum
canvas.setFocusPolicy(Qt.StrongFocus)
canvas.setFocus()
self.window._destroying = False
# add text label to status bar
self.statusbar_label = QLabel()
self.window.statusBar().addWidget(self.statusbar_label)
self.toolbar = self._get_toolbar(canvas, self.window)
if self.toolbar is not None:
self.window.addToolBar(self.toolbar)
self.toolbar.message.connect(self.statusbar_label.setText)
self.toolbar.sig_grid_toggle_triggered.connect(self.grid_toggle)
self.toolbar.sig_toggle_fit_triggered.connect(self.fit_toggle)
self.toolbar.setFloatable(False)
tbs_height = self.toolbar.sizeHint().height()
else:
tbs_height = 0
# resize the main window so it will display the canvas with the
# requested size:
cs = canvas.sizeHint()
sbs = self.window.statusBar().sizeHint()
self._status_and_tool_height = tbs_height + sbs.height()
height = cs.height() + self._status_and_tool_height
self.window.resize(cs.width(), height)
self.fit_browser = FitPropertyBrowser(canvas, ToolbarStateChecker(self.toolbar))
self.fit_browser.closing.connect(self.handle_fit_browser_close)
self.window.setCentralWidget(canvas)
self.window.addDockWidget(Qt.LeftDockWidgetArea, self.fit_browser)
self.fit_browser.hide()
if matplotlib.is_interactive():
self.window.show()
canvas.draw_idle()
def notify_axes_change(fig):
# This will be called whenever the current axes is changed
if self.toolbar is not None:
self.toolbar.update()
canvas.figure.add_axobserver(notify_axes_change)
# Register canvas observers
self._fig_interation = FigureInteraction(self)
self._ads_observer = FigureManagerADSObserver(self)
# Plotted workspace names/spectra in form '{workspace}: spec {spec_num}'
self.workspace_labels = []
self.window.raise_()
def full_screen_toggle(self):
if self.window.isFullScreen():
self.window.showNormal()
else:
self.window.showFullScreen()
def _window_activated(self):
Gcf.set_active(self)
def _get_toolbar(self, canvas, parent):
return WorkbenchNavigationToolbar(canvas, parent, False)
def resize(self, width, height):
'set the canvas size in pixels'
self.window.resize(width, height + self._status_and_tool_height)
def show(self): # noqa
self.window.show()
self.window.activateWindow()
self.window.raise_()
if self.window.windowState() & Qt.WindowMinimized:
# windowState() stores a combination of window state enums
# and multiple window states can be valid. On Windows
# a window can be both minimized and maximized at the
# same time, so we make a check here. For more info see:
# http://doc.qt.io/qt-5/qt.html#WindowState-enum
if self.window.windowState() & Qt.WindowMaximized:
self.window.setWindowState(Qt.WindowMaximized)
else:
self.window.setWindowState(Qt.WindowNoState)
# Hack to ensure the canvas is up to date
self.canvas.draw_idle()
if figure_type(self.canvas.figure) != FigureType.Line:
self._set_fit_enabled(False)
self._update_workspace_labels()
def destroy(self, *args):
# check for qApp first, as PySide deletes it in its atexit handler
if QApplication.instance() is None:
return
if self.window._destroying:
return
self.window._destroying = True
if self.toolbar:
self.toolbar.destroy()
self._ads_observer.observeAll(False)
del self._ads_observer
self._fig_interation.disconnect()
self.window.close()
try:
Gcf.destroy(self.num)
except AttributeError:
pass
# It seems that when the python session is killed,
# Gcf can get destroyed before the Gcf.destroy
# line is run, leading to a useless AttributeError.
def grid_toggle(self):
"""
Toggle grid lines on/off
"""
canvas = self.canvas
axes = canvas.figure.get_axes()
for ax in axes:
ax.grid()
canvas.draw_idle()
def fit_toggle(self):
"""
Toggle fit browser and tool on/off
"""
if self.fit_browser.isVisible():
self.fit_browser.hide()
else:
self.fit_browser.show()
def handle_fit_browser_close(self):
"""
Respond to a signal that user closed self.fit_browser by clicking the [x] button.
"""
self.toolbar.trigger_fit_toggle_action()
def hold(self):
"""
Mark this figure as held
"""
self.toolbar.hold()
def get_window_title(self):
return text_type(self.window.windowTitle())
def set_window_title(self, title):
self.window.setWindowTitle(title)
# We need to add a call to the figure manager here to call
# notify methods when a figure is renamed, to update our
# plot list.
Gcf.figure_title_changed(self.num)
# For the workbench we also keep the label in sync, this is
# to allow getting a handle as plt.figure('Figure Name')
self.canvas.figure.set_label(title)
def fig_visibility_changed(self):
"""
Make a notification in the global figure manager that
plot visibility was changed. This method is added to this
class so that it can be wrapped in a QAppThreadCall.
"""
Gcf.figure_visibility_changed(self.num)
def get_curve_labels(self):
"""Get curve labels from Matplotlib figure"""
return [line.get_label() for line in self.canvas.figure.axes[0].lines]
def _update_workspace_labels(self):
"""Check for new curves and update the workspace labels"""
curve_labels = self.get_curve_labels()
num_new_curves = len(curve_labels) - len(self.workspace_labels)
self.workspace_labels += curve_labels[-num_new_curves:]
self._update_fit_browser_workspace_labels()
can_fit = self.fit_browser.can_fit_spectra(self.workspace_labels)
self._set_fit_enabled(can_fit)
def _update_fit_browser_workspace_labels(self):
self.fit_browser.workspace_labels = self.workspace_labels
def _set_fit_enabled(self, on):
action = self.toolbar._actions['toggle_fit']
action.setEnabled(on)
action.setVisible(on)
# -----------------------------------------------------------------------------
# Figure control
# -----------------------------------------------------------------------------
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
from matplotlib.figure import Figure # noqa
figure_class = kwargs.pop('FigureClass', Figure)
this_fig = figure_class(*args, **kwargs)
return new_figure_manager_given_figure(num, this_fig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasQTAgg(figure)
manager = FigureManagerWorkbench(canvas, num)
return manager
if __name__ == '__main__':
# testing code
import numpy as np
qapp = QApplication([' '])
qapp.setAttribute(Qt.AA_UseHighDpiPixmaps)
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
qapp.setAttribute(Qt.AA_EnableHighDpiScaling, True)
x = np.linspace(0, 10 * np.pi, 1000)
cx, sx = np.cos(x), np.sin(x)
fig_mgr_1 = new_figure_manager(1)
fig1 = fig_mgr_1.canvas.figure
ax = fig1.add_subplot(111)
ax.set_title("Test title")
ax.set_xlabel(r"$\mu s$")
ax.set_ylabel("Counts")
ax.plot(x, cx)
fig1.show()
qapp.exec_()
| gpl-3.0 |
krez13/scikit-learn | sklearn/svm/tests/test_sparse.py | 22 | 13181 | from nose.tools import assert_raises, assert_true, assert_false
import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.svm.tests import test_svm
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import assert_warns, assert_raise_message
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
if isinstance(dense_svm, svm.OneClassSVM):
msg = "cannot use sparse input in 'OneClassSVM' trained on dense data"
else:
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0,
decision_function_shape='ovo')
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0,
decision_function_shape='ovo')
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
#Test decision_function
#Sanity check, test that decision_function implemented in python
#returns the same as the one in libsvm
# multi class:
svc = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo')
clf = svc.fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_oneclasssvm():
"""Check that sparse OneClassSVM gives the same result as dense OneClassSVM"""
# many class dataset:
X_blobs, _ = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, None, T], [X2_sp, None, T2],
[X_blobs[:80], None, X_blobs[80:]],
[iris.data, None, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.OneClassSVM(kernel=kernel, random_state=0)
sp_clf = svm.OneClassSVM(kernel=kernel, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catchs some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
| bsd-3-clause |
Srisai85/scipy | scipy/interpolate/tests/test_rbf.py | 41 | 4367 | #!/usr/bin/env python
# Created by John Travers, Robert Hetland, 2007
""" Test functions for rbf module """
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_, assert_array_almost_equal,
assert_almost_equal, run_module_suite)
from numpy import linspace, sin, random, exp, allclose
from scipy.interpolate.rbf import Rbf
FUNCTIONS = ('multiquadric', 'inverse multiquadric', 'gaussian',
'cubic', 'quintic', 'thin-plate', 'linear')
def check_rbf1d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (1D)
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=function)
yi = rbf(x)
assert_array_almost_equal(y, yi)
assert_almost_equal(rbf(float(x[0])), y[0])
def check_rbf2d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (2D).
x = random.rand(50,1)*4-2
y = random.rand(50,1)*4-2
z = x*exp(-x**2-1j*y**2)
rbf = Rbf(x, y, z, epsilon=2, function=function)
zi = rbf(x, y)
zi.shape = x.shape
assert_array_almost_equal(z, zi)
def check_rbf3d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (3D).
x = random.rand(50, 1)*4 - 2
y = random.rand(50, 1)*4 - 2
z = random.rand(50, 1)*4 - 2
d = x*exp(-x**2 - y**2)
rbf = Rbf(x, y, z, d, epsilon=2, function=function)
di = rbf(x, y, z)
di.shape = x.shape
assert_array_almost_equal(di, d)
def test_rbf_interpolation():
for function in FUNCTIONS:
yield check_rbf1d_interpolation, function
yield check_rbf2d_interpolation, function
yield check_rbf3d_interpolation, function
def check_rbf1d_regularity(function, atol):
# Check that the Rbf function approximates a smooth function well away
# from the nodes.
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, function=function)
xi = linspace(0, 10, 100)
yi = rbf(xi)
# import matplotlib.pyplot as plt
# plt.figure()
# plt.plot(x, y, 'o', xi, sin(xi), ':', xi, yi, '-')
# plt.plot(x, y, 'o', xi, yi-sin(xi), ':')
# plt.title(function)
# plt.show()
msg = "abs-diff: %f" % abs(yi - sin(xi)).max()
assert_(allclose(yi, sin(xi), atol=atol), msg)
def test_rbf_regularity():
tolerances = {
'multiquadric': 0.1,
'inverse multiquadric': 0.15,
'gaussian': 0.15,
'cubic': 0.15,
'quintic': 0.1,
'thin-plate': 0.1,
'linear': 0.2
}
for function in FUNCTIONS:
yield check_rbf1d_regularity, function, tolerances.get(function, 1e-2)
def check_rbf1d_stability(function):
# Check that the Rbf function with default epsilon is not subject
# to overshoot. Regression for issue #4523.
#
# Generate some data (fixed random seed hence deterministic)
np.random.seed(1234)
x = np.linspace(0, 10, 50)
z = x + 4.0 * np.random.randn(len(x))
rbf = Rbf(x, z, function=function)
xi = np.linspace(0, 10, 1000)
yi = rbf(xi)
# subtract the linear trend and make sure there no spikes
assert_(np.abs(yi-xi).max() / np.abs(z-x).max() < 1.1)
def test_rbf_stability():
for function in FUNCTIONS:
yield check_rbf1d_stability, function
def test_default_construction():
# Check that the Rbf class can be constructed with the default
# multiquadric basis function. Regression test for ticket #1228.
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_function_is_callable():
# Check that the Rbf class can be constructed with function=callable.
x = linspace(0,10,9)
y = sin(x)
linfunc = lambda x:x
rbf = Rbf(x, y, function=linfunc)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_two_arg_function_is_callable():
# Check that the Rbf class can be constructed with a two argument
# function=callable.
def _func(self, r):
return self.epsilon + r
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=_func)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_rbf_epsilon_none():
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, epsilon=None)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
icdishb/scikit-learn | sklearn/utils/tests/test_testing.py | 33 | 3783 | import warnings
import unittest
import sys
from nose.tools import assert_raises
from sklearn.utils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message)
from sklearn.tree import DecisionTreeClassifier
from sklearn.lda import LDA
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LDA()
tree = DecisionTreeClassifier()
# LDA doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| bsd-3-clause |
anurag313/scikit-learn | sklearn/metrics/regression.py | 175 | 16953 | """Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Manoj Kumar <[email protected]>
# Michael Eickenberg <[email protected]>
# Konstantin Shmelkov <[email protected]>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
import warnings
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred, multioutput):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape = (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape = (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
multioutput_options = (None, 'raw_values', 'uniform_average',
'variance_weighted')
if multioutput not in multioutput_options:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared error regression loss
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.416..., 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.824...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples)
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred,
'uniform_average')
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
... # doctest: +ELLIPSIS
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def r2_score(y_true, y_pred,
sample_weight=None,
multioutput=None):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average',
'variance_weighted'] or None or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default value correponds to 'variance_weighted', but
will be changed to 'uniform_average' in next versions.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred, multioutput='variance_weighted') # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput is None and y_true.shape[1] != 1:
# @FIXME change in 0.18
warnings.warn("Default 'multioutput' behavior now corresponds to "
"'variance_weighted' value, it will be changed "
"to 'uniform_average' in 0.18.",
DeprecationWarning)
multioutput = 'variance_weighted'
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
| bsd-3-clause |
sprax/python | txt/sim_tfidf_nltk.py | 1 | 21467 | #!/usr/bin/env python3
'''Text similarity (between words, phrases, or short sentences) using NLTK'''
import heapq
import string
import time
import nltk
from sklearn.feature_extraction.text import TfidfVectorizer
import pdb
import qa_csv
import text_fio
STEMMER = nltk.stem.porter.PorterStemmer()
TRANS_NO_PUNCT = str.maketrans('', '', string.punctuation)
STOP_WORDS = nltk.corpus.stopwords.words('english')
# Most question words can also be used in qualifiers. It's not the word, but the useage we want.
QUERY_WORDS = ['how', 'what', 'when', 'where', 'which', 'who', 'why']
MOST_STOPS = [word for word in STOP_WORDS if word not in QUERY_WORDS]
def stem_tokens(tokens, stemmer=STEMMER):
'''list of stems, one per input tokens'''
return [stemmer.stem(item) for item in tokens]
def normalize(text, translation=TRANS_NO_PUNCT):
'''remove punctuation, lowercase, stem'''
return stem_tokens(nltk.word_tokenize(text.translate(translation).lower()))
VECTORIZER = TfidfVectorizer(tokenizer=normalize, stop_words='english')
VECT_NO_STOPS = TfidfVectorizer(tokenizer=normalize)
VECT_MOST_STOPS = TfidfVectorizer(tokenizer=normalize, stop_words=MOST_STOPS)
def remove_stop_words(tokens, stop_words=STOP_WORDS):
'''filter out stop words'''
return [tok for tok in tokens if tok not in stop_words]
def ident(obj):
'''identify function: just returns its argument'''
return obj
def first(obj):
'''first: returns the first item from an indexible, or failing that, just the object'''
try:
return obj.__getitem__(0)
except TypeError:
return obj
def second(obj):
'''second: returns second item from an indexible, or failing that, just the object'''
try:
return obj.__getitem__(1)
except TypeError:
return obj
def third(obj):
'''third: returns third item from an indexible, or failing that, just the object'''
try:
return obj.__getitem__(2)
except TypeError:
return obj
def cosine_sim_txt(txt_1, txt_2, vectorizer=VECTORIZER):
'''dot-product (projection) similarity'''
tfidf = vectorizer.fit_transform([txt_1, txt_2])
return ((tfidf * tfidf.T).A)[0, 1]
def sim_weighted_qas(one_quanda, other_quanda, get_question=second, get_answer=third, q_weight=0.5,
sim_func=cosine_sim_txt):
'''dot-product (projection) similarity combining similarities of questions and, if available, answers'''
assert 0.0 < q_weight and q_weight <= 1.0
q_sim = sim_func(get_question(one_quanda), get_question(other_quanda))
if q_weight < 1.0:
ans_1 = get_answer(one_quanda)
ans_2 = get_answer(other_quanda)
if ans_1 and ans_2:
try:
a_sim = sim_func(ans_1, ans_2)
return (q_sim - a_sim) * q_weight + a_sim
except ValueError as vex:
print("Error on answers (%s|%s): %s" % (ans_1, ans_2, vex))
raise vex
return q_sim
def cosine_sim_quanda(one_quanda, other_quanda, get_question=second, get_answer=third, q_weight=0.5, vectorizer=VECTORIZER):
'''dot-product (projection) similarity combining similarities of questions and, if available, answers'''
assert 0 < q_weight and q_weight <= 1
q_sim = cosine_sim_txt(get_question(one_quanda), get_question(other_quanda), vectorizer)
if q_weight >= 1.0:
return q_sim
ans_1 = get_answer(one_quanda)
ans_2 = get_answer(other_quanda)
if ans_1 and ans_2:
try:
a_sim = cosine_sim_txt(ans_1, ans_2, vectorizer)
return (q_sim - a_sim) * q_weight + a_sim
except ValueError as vex:
print("Error on answers (%s|%s): %s" % (ans_1, ans_2, vex))
return q_sim
def cosine_sim_quanda_2(one_quanda, other_quanda, get_question=second, get_answer=third,
q_weight=0.5, vectorizer=VECT_NO_STOPS):
'''dot-product (projection) similarity combining similarities of questions
and, if available, answers'''
assert 0.0 < q_weight and q_weight <= 1.0
if q_weight >= 1.0:
print("Degenerate q_weight: ", q_weight)
return cosine_sim_txt(get_question(one_quanda), get_question(other_quanda), vectorizer)
# print("DBG CSQ: Q(%s) A(%s)" % (get_question(other_quanda), get_answer(other_quanda)))
qst_1 = get_question(one_quanda)
qst_2 = get_question(other_quanda)
ans_1 = get_answer(one_quanda)
ans_2 = get_answer(other_quanda)
try:
tfidf = vectorizer.fit_transform([qst_1, qst_2, ans_1, ans_2])
q_sim = ((tfidf * tfidf.T).A)[0, 1]
a_sim = ((tfidf * tfidf.T).A)[2, 3]
return (q_sim - a_sim) * q_weight + a_sim
except ValueError as vex:
print("Error, probably on answers (%s|%s): %s" % (ans_1, ans_2, vex))
return 0.0
def cosine_sim_quanda_ms(one_quanda, other_quanda, get_question=second, get_answer=third,
q_weight=0.5, vectorizer=VECT_MOST_STOPS):
'''Returns weighted Q & A similarity between two question-answer pairs'''
return cosine_sim_quanda_2(one_quanda, other_quanda, get_question, get_answer,
q_weight, vectorizer)
def smoke_test():
'''Tests that basic sentence similarity functionality works, or at least does not blow-up'''
sent_1 = 'a little bird'
sent_2 = 'a little bird chirps'
sent_3 = 'a big dog barks a lot'
print("cosine_sim_txt(%s, %s) == %f" % (sent_1, sent_1, cosine_sim_txt(sent_1, sent_1)))
print("cosine_sim_txt(%s, %s) == %f" % (sent_1, sent_2, cosine_sim_txt(sent_1, sent_2)))
print("cosine_sim_txt(%s, %s) == %f" % (sent_1, sent_3, cosine_sim_txt(sent_1, sent_3)))
questions = ['How is that fair?!', 'What is fair?!', 'When is the fair?', 'Where is the fair?',
'Who is fair?', 'Why is that fair?']
for qst in questions:
print("remove_stop_words(normalize(%s)) -> " % qst, remove_stop_words(normalize(qst)))
def nearest_known(saved_texts, input_text, similarity_func, threshold):
'''find text in saved_texts most similar to input_text, if similarity >= threshold'''
idx, sim = nearest_other_idx(saved_texts, input_text, similarity_func, threshold)
if idx < 0:
print("No saved text found more similar than %f" % threshold)
else:
print("Nearest at %f (%d) %s" % (sim, idx, saved_texts[idx]))
def nearest_other_idx(other_texts, quat, similarity_func, max_sim_val):
'''
Find the text in the other_texts array most similar to quat and return its index.
similarity_func: function returning the similariy between two texts (as in sentences)
vocab: the set of all known words
max_sim_val: the initial value of max, or the maximum similariy found so far.
'''
max_sim_idx = -9999
for idx, other_text in enumerate(other_texts):
sim = similarity_func(quat, other_text)
if max_sim_val < sim:
max_sim_val = sim
max_sim_idx = idx
return max_sim_idx, max_sim_val
def list_nearest_other_idx(texts, similarity_func=cosine_sim_txt):
'''
For each text in texts, find the index of the most similar other text.
Returns the list of indexes. The mapping is not necessarily 1-1, that is,
two texts may share a most similar other text.
similarity_func: function returning the similariy between two texts (as in sentences)
vocab: the set of all known words
'''
nearests = len(texts)*[None]
for idx, txt in enumerate(texts):
max_idx_0, max_sim_0 = nearest_other_idx(texts[:idx], txt, similarity_func, -1)
max_idx_1, max_sim_1 = nearest_other_idx(texts[idx+1:], txt, similarity_func, max_sim_0)
nearests[idx] = 1 + idx + max_idx_1 if max_sim_1 > max_sim_0 else max_idx_0
return nearests
def show_nearest_neighbors(texts, nearest_indexes=None):
'''print the most similar pairs'''
if nearest_indexes is None:
nearest_indexes = list_nearest_other_idx(texts)
for idx, txt in enumerate(texts):
nearest_idx = nearest_indexes[idx]
nearest_txt = texts[nearest_idx]
print(" %3d. T %s\n %3d. O %s\n" % (idx, txt, nearest_idx, nearest_txt))
###############################################################################
def similarity_dict(quandas, one_quanda, excludes=None, q_weight=1.0, sim_func=cosine_sim_txt,
min_sim_val=0.0, max_sim_val=1):
'''
Returns a dict mapping all_quandas' indexes to their similarity with quat,
provide their similarity value >= min_sim_val
similarity_func: function returning the similariy between two texts (as in sentences)
min_sim_val: similarity threshold
'''
if excludes is None:
excludes = []
sim_dict = {}
for idx, other_quanda in enumerate(quandas):
if idx in excludes:
continue
try:
sim = sim_weighted_qas(one_quanda, other_quanda, q_weight=q_weight, sim_func=sim_func)
if sim >= min_sim_val:
if sim > max_sim_val:
sim = max_sim_val
sim_dict[idx] = sim
except ValueError as ex:
print("Continuing past error at idx: %d (%s)" % (idx, ex))
return sim_dict
def nlargest_items_by_value(dict_with_comparable_values, count=10):
'''Returns a list of the maximally valued N items (key, value)-tuples) in descending order by value.'''
return heapq.nlargest(count, dict_with_comparable_values.items(), key=lambda item: (item[1], -item[0]))
def nlargest_keys_by_value(dict_with_comparable_values, count=10):
'''Returns a list of the keys to the greatest values, in descending order by value.'''
return heapq.nlargest(count, dict_with_comparable_values, key=dict_with_comparable_values.get)
def nlargest_values(dict_with_comparable_values, count=10):
'''Returns a list of the greatest values in descending order. Duplicates permitted.'''
return heapq.nlargest(count, dict_with_comparable_values.values())
def find_nearest_peers(all_quandas, quat, excludes=None, q_weight=1.0, sim_func=cosine_sim_txt, max_count=5, min_sim_val=0.0):
'''
Find the N most similar texts to quat and return a list of (index, similarity) pairs in
descending order of similarity.
all_quandas: the sentences or question-answer-tuples or whatever is to be compared.
quat: the one to compare against the rest
excludes: list of IDs to exclude from the comparison; e.g. quat.id if excluding self comparison.
similarity_func: function returning the similariy between two texts (as in sentences)
vocab: the set of all known words
max_count maximum size of returned dict
max_sim_val: the initial value of max, or the maximum similariy found so far.
'''
assert q_weight >= 0.0
print("FNP/fn={}: {}".format(sim_func, quat))
sim_dict = similarity_dict(all_quandas, quat, excludes, q_weight=q_weight, sim_func=sim_func, min_sim_val=min_sim_val)
return nlargest_items_by_value(sim_dict, max_count)
def find_nearest_peer_lists(quandas, q_weight=1.0, sim_func=cosine_sim_txt, max_count=5,
min_sim_val=0.0, id_eq_index=False):
'''
For each question-and-answer tuple in quandas, find a list of indexes of the most similar Q and A's.
Returns list of lists of items as in: [[(index, similariy), ...], ...]
similarity_func: function returning the similariy between two texts (as in sentences)
vocab: the set of all known words
'''
assert q_weight >= 0.0
nearests = len(quandas)*[None]
for idx, quanda in enumerate(quandas):
# consistency check:
idn = quanda[0]
assert isinstance(idn, int)
if id_eq_index:
# print("STN:FNPL: ", quanda)
if idx > 0 and idx + 100 != idn:
print("ERROR:", (idx + 100), "!=", quanda[0], "at", quanda)
raise IndexError
nearests[idx] = find_nearest_peers(quandas, quanda, [idx], q_weight=q_weight,
sim_func=sim_func, max_count=max_count,
min_sim_val=min_sim_val)
return nearests
def find_ranked_qa_lists_inclusive(quandas, q_weight=1.0, sim_func=cosine_sim_txt, max_count=5,
min_sim_val=0.0, id_eq_index=False):
return [find_nearest_peers(quandas, quanda, None, q_weight, sim_func,
max_count, min_sim_val) for quanda in quandas]
def find_ranked_qa_lists_verbose(quandas, exclude_self=True, q_weight=1.0, sim_func=cosine_sim_txt,
max_count=6, min_sim_val=0):
'''
Returns list of most similar lists. For each object in quandas, compute the similarity with all
(other) objects in quandas, and save at most max_count indices and similarity measures in descending
order of similarity, where similiary >= min_sim_val. If exclude_self is false, compare each object
with itself as well as the others (sanity check)
'''
ranked_lists = None
beg_time = time.time()
if exclude_self:
ranked_lists = find_nearest_peer_lists(quandas, q_weight, sim_func, max_count, min_sim_val, id_eq_index=False)
else:
ranked_lists = find_ranked_qa_lists_inclusive(quandas, q_weight, sim_func, max_count, min_sim_val, id_eq_index=True)
seconds = time.time() - beg_time
print("Finding all similarity lists (size=%d, count=%d) took %.1f seconds" % (len(quandas), max_count, seconds))
return ranked_lists
def show_most_sim_texts_list(texts, most_sim_lists=None):
'''print already-found similarity lists'''
if most_sim_lists is None:
most_sim_lists = find_ranked_qa_lists_verbose(texts) # use defaults
for idx, txt in enumerate(texts):
most_sim_list = most_sim_lists[idx]
print(" %3d. %s" % (idx, txt))
for oix, sim in most_sim_list:
print(" %3d %.5f %s" % (oix, sim, texts[oix]))
print()
return most_sim_lists
def distance_counts(quandas, most_sim_lists, max_dist):
'''
Returns a list of miss-distance counts: how many missed the gold standard by 0 (exact match),
how many missed it by one (as in the gold standard got the second highest similarity score)
how many missed it by two, on up to max_dist. The total number of items with a gold standard
is added as the last element in the list.
'''
dist_counts = (max_dist + 1) * [0]
gold_scored = 0
for qax, sim_list in zip(quandas, most_sim_lists):
if len(qax) > 3 and qax[3]:
try:
gold = int(qax[3])
assert isinstance(gold, int)
except ValueError as ex:
print("ERROR on: ", qax, ex)
continue
gold_scored += 1
# ms = sim_list[0]
# msi = ms[0]
# sim = ms[1]
# print("DBG_F: Q_%d <==> Q_%d (%s <==> %s) first, %.4f (%s : %s)" % (int(qax[0]), msi, qax[1],
# quandas[msi][1], sim, remove_stop_words(normalize(qax[1])), remove_stop_words(normalize(quandas[msi][1]))))
for idx, item in enumerate(sim_list):
# print("DC: %d item(%d, %f)" % (idx, item[0], item[1]))
qax = quandas[item[0]]
if gold == qax[0]: # compare idn to idn (not idx)
# print("DBG_G: Q_%d <==> Q_%d (%s <==> %s) at %d, %.4f (%s : %s)\n" % (int(qax[0]), item[0], qax[1], quandas[item[0]][1],
# idx, item[1], remove_stop_words(normalize(qax[1])), remove_stop_words(normalize(quandas[item[0]][1]))))
dist_counts[idx] += 1
break
# save the number of gold standard matches as the last count in the list
dist_counts[max_dist] = gold_scored
return dist_counts
def score_distance_counts(dist_counts, weights):
'''Compute a score from miss-distance counts. The perfect score would be 1.0'''
assert len(weights) > 0 and weights[0] == 1.0
assert len(weights) < len(dist_counts)
gold_scored = dist_counts[-1]
# print("DBG SDC DCS:", dist_counts)
# print("DBG SDC WTS:", weights)
assert gold_scored > 0
score = dist_counts[0] # number of exact matches
for idx, weight in enumerate(weights[1:], 1):
assert weight <= weights[idx - 1]
# print("DBG SDC LOOP:", idx, dist_counts[idx])
score += weight * dist_counts[idx]
# print("DBG_SDC: score(%.4f) / %d == %f" % (score, gold_scored, score/gold_scored))
return score / gold_scored
def score_most_sim_lists(quandas, most_sim_lists, weights=None):
'''Sum up gold-standard accuracy score'''
if weights is None:
weights = [1.0, 0.8, 0.6, 0.4, 0.2, 0.1]
dist_counts = distance_counts(quandas, most_sim_lists, len(weights))
return score_distance_counts(dist_counts, weights)
def save_most_sim_qa_lists_tsv(quandas, path, most_sim_lists, min_sim_val=0, sort_most_sim=True):
'''Save ranked most-similar lists to TSV file'''
isorted = None
sim_oix = []
if sort_most_sim:
# TODO: replace with zip
for idx, sim_list in enumerate(most_sim_lists):
print("MSL:", most_sim_lists)
if len(sim_list) > 0 and len(sim_list[0]) > 0:
max_oix = sim_list[0][0]
max_sim = sim_list[0][1]
sum_sim = sum([y[1] for y in sim_list])
sim_oix.append((max_sim, sum_sim, -idx, -max_oix))
else:
print("EMPTY SIM_LIST!")
# TODO: sorted with 2 keys?? FIXME: should sort on all keys!!
isorted = [-tup[2] for tup in sorted(sim_oix, reverse=True)]
else:
isorted = range(len(quandas))
print("ISORTED ", len(isorted), ": ", isorted)
out = text_fio.open_out_file(path)
mix = 0
for idx in isorted:
qax = quandas[idx]
idn = qax[0]
most_sim_list = most_sim_lists[idx]
lqax, ansr = len(qax), 'N/A'
if lqax < 3:
print("MISSING ANSWER at:", idx, qax[0], qax[1], "ANSWER:", ansr, sep="\t")
else:
ansr = qax[2]
gold = qax[3] if lqax > 3 else None
print(idn, qax[1], ansr, gold, sep="\t", file=out)
for oix, sim in most_sim_list: # Note: oix = other index, i.e., the index of the gold standard other QAS
if sim > min_sim_val:
try:
# print("IDX: ", idx, " OIX: ", oix, " SIM: ", sim)
quox = quandas[oix]
sidn = quox[0]
stxt = quox[1]
sans = quox[2] if len(quox) > 2 else 'N/A'
sgld = quox[3] if len(quox) > 3 else None
print("\t%3d\t%.5f\t%s\t%s\t%s" % (sidn, sim, stxt, sans, sgld), file=out)
except Exception as ex:
print("ERROR AT MIX {}: idx {} qax {}, err: {}\n".format(mix, idx, qax, ex))
# pass OR raise IndexError ??
# if sort_most_sim:
# print("TUP {:3}:\t {}".format(mix, sim_oix[idx]))
print(file=out)
mix += 1
if path != '-':
out.close()
# VECTORIZER (default): (size=201, count=6) took 96.1 seconds; score 0.8583
# VECT_MOST_STOPS (DEFAULT-QUERY_WORDS): (size=201, count=6) took 96.3 seconds; score 0.6635
# TODO: Why do the query words make the score worse?
# TEST: >>> match_tat(fair, sim_func=sim_wosc_nltk.sentence_similarity)
def match_tat(quandas, path="simlists.tsv", q_weight=1.0, sim_func=cosine_sim_txt,
max_count=6, min_sim_val=0, sort_most_sim=False):
'''Compute similarities using sim_func, score them against gold standard, and save
the list of similarity lists to TSV for further work. Many default values are
assumed, and the score is returned, not saved.'''
beg_time = time.time()
most_sim_lists = find_ranked_qa_lists_verbose(quandas, exclude_self=True, q_weight=q_weight,
sim_func=sim_func, max_count=max_count,
min_sim_val=min_sim_val)
score = score_most_sim_lists(quandas, most_sim_lists)
save_most_sim_qa_lists_tsv(quandas, path, most_sim_lists, min_sim_val=min_sim_val, sort_most_sim=sort_most_sim)
seconds = time.time() - beg_time
print("match_tat(size=%d, count=%d) took %.1f seconds; score %.4f" % (len(quandas), max_count,
seconds, score))
return score, most_sim_lists
###############################################################################
# >>> quandas = sc.csv_read_qa("simsilver.tsv", delimiter="\t")
# >>> score, msl = sn.match_tat(quandas, "simlists_sort.tsv", min_sim_val=0.00)
# Finding all similarity lists (size=309, count=6) took 218.1 seconds
# match_tat(size=309, count=6) took 218.1 seconds; score 0.5941
###############################################################################
def test_fair():
'''test similariy of QA pairs containing many stop words, including "fair"'''
fair = qa_csv.csv_read_qa('fair.txt', delimiter='\t')
score = match_tat(fair)
print("match_tat(fair) => %.3f" % score)
if __name__ == '__main__':
test_fair()
| lgpl-3.0 |
ArnaudKOPP/BioREST | BioREST/Psicquic.py | 1 | 40457 | # coding=utf-8
"""
Interface to the PSICQUIC web service
.. topic:: What is PSICQUIC ?
:URL: http://code.google.com/p/psicquic/
:REST: http://code.google.com/p/psicquic/wiki/PsicquicSpec_1_3_Rest
.. highlights::
"PSICQUIC is an effort from the HUPO Proteomics Standard Initiative
(HUPO-PSI) to standardise the access to molecular interaction databases
programmatically. The PSICQUIC View web interface shows that PSICQUIC
provides access to 25 active service "
-- Dec 2012
About queries
================
The idea behind PSICQUIC is to retrieve information related to protein
interactions from various databases. Note that protein interactions does not
necesseraly mean protein-protein interactions. In order to be effective, the
query format has been standarised.
To do a search you can use the Molecular Interaction Query Language which is
based on Lucene's syntax. Here are some rules
* Use OR or space ' ' to search for ANY of the terms in a field
* Use AND if you want to search for those interactions where ALL of your terms are found
* Use quotes (") if you look for a specific phrase (group of terms that must
be searched together) or terms containing special characters that may otherwise
be interpreted by our query engine (eg. ':' in a GO term)
* Use parenthesis for complex queries (e.g. '(XXX OR YYY) AND ZZZ')
* Wildcards (`*`,?) can be used between letters in a term or at the end of terms to do fuzzy queries,
but never at the beginning of a term.
* Optionally, you can prepend a symbol in front of your term.
* + (plus): include this term. Equivalent to AND. e.g. +P12345
* - (minus): do not include this term. Equivalent to NOT. e.g. -P12345
* Nothing in front of the term. Equivalent to OR. e.g. P12345
* Implicit fields are used when no field is specified (simple search). For
instance, if you put 'P12345' in the simple query box, this will mean the same
as identifier:P12345 OR pubid:P12345 OR pubauth:P12345 OR species:P12345 OR
type:P12345 OR detmethod:P12345 OR interaction_id:P12345
About the MITAB output
=========================
The output returned by a query contains a list of entries. Each entry is
formatted following the MITAB output.
Here below are listed the name of the field returned ordered as they would
appear in one entry. The first item is always idA whatever version of MITAB is
used. The version 25 of MITAB contains the first 15 fields in the table below.
Newer version may incude more fields but always include the 15 from MITAB 25 in
the same order. See the link from **irefindex**
`about mitab <http://irefindex.uio.no/wiki/README_MITAB2.6_for_iRefIndex_8.0#What_each_line_represents>`_
for more information.
=============== =========================================== =============== ======================
Field Name Searches on Implicit* Example
=============== =========================================== =============== ======================
idA Identifier A No idA:P74565
idB Identifier B No idB:P74565
id Identifiers (A or B) No id:P74565
alias Aliases (A or B) No alias:(KHDRBS1 HCK)
identifiers Identifiers and Aliases undistinctively Yes identifier:P74565
pubauth Publication 1st author(s) Yes pubauth:scott
pubid Publication Identifier(s) OR Yes pubid:(10837477 12029088)
taxidA Tax ID interactor A: the tax ID or
the species name No taxidA:mouse
taxidB Tax ID interactor B: the tax ID or
species name No taxidB:9606
species Species. Tax ID A or Tax ID B Yes species:human
type Interaction type(s) Yes type:"physical interaction"
detmethod Interaction Detection method(s) Yes detmethod:"two hybrid*"
interaction_id Interaction identifier(s) Yes interaction_id:EBI-761050
pbioroleA Biological role A Yes pbioroleA:ancillary
pbioroleB Biological role B Yes pbioroleB:"MI:0684"
pbiorole Biological roles (A or B) Yes pbiorole:enzyme
ptypeA Interactor type A Yes ptypeA:protein
ptypeB Interactor type B Yes ptypeB:"gene"
ptype Interactor types (A or B) Yes pbiorole:"small molecule"
pxrefA Interactor xref A (or Identifier A) Yes pxrefA:"GO:0003824"
pxrefB Interactor xref B (or Identifier B) Yes pxrefB:"GO:0003824"
pxref Interactor xrefs (A or B or Identifier
A or Identifier B) Yes pxref:"catalytic activity"
xref Interaction xrefs (or Interaction
identifiers) Yes xref:"nuclear pore"
annot Interaction annotations and tags Yes annot:"internally curated"
udate Update date Yes udate:[20100101 TO 20120101]
negative Negative interaction boolean Yes negative:true
complex Complex expansion Yes complex:"spoke expanded"
ftypeA Feature type of participant A Yes ftypeA:"sufficient to bind"
ftypeB Feature type of participant B Yes ftypeB:mutation
ftype Feature type of participant A or B Yes ftype:"binding site"
pmethodA Participant identification method A Yes pmethodA:"western blot"
pmethodB Participant identification method B Yes pmethodB:"sequence tag identification"
pmethod Participant identification methods
(A or B) Yes pmethod:immunostaining
stc Stoichiometry (A or B). Only true or
false, just to be able to filter
interaction having stoichiometry available Yes stc:true
param Interaction parameters. Only true or
false, just to be able to filter
interaction having parameters available Yes param:true
=============== =========================================== =============== ======================
# #### Psicquic REST TEST
# from BioREST import PSICQUIC
# p = PSICQUIC()
# p.print_status(full=True)
# print(p.activeDBs)
# p.retrieve("intact", "brca2", "tab27")
# p.retrieve("intact", "zap70", "xml25")
# p.retrieve("matrixdb", "*", "xml25")
# print(p.retrieve("string", "species:10090", firstresult=0, maxresults=100, output="tab25"))
# print(p.retrieve("biogrid", "ZAP70"))
# print(p.retrieve("biogrid", "ZAP70 AND species:10090"))
# res = p.retrieve("intact", "zap70")
# for x in res:
# print(x)
# print(p.get_db_properties('intact'))
# print(p.retrive_all("ZAP70 AND species:9606"))
"""
__author__ = "Arnaud KOPP"
__copyright__ = "© 2015-2016 KOPP Arnaud All Rights Reserved"
__credits__ = ["KOPP Arnaud"]
__license__ = "GNU GPL V3.0"
__maintainer__ = "Arnaud KOPP"
__email__ = "[email protected]"
__status__ = "Production"
import logging
from BioREST.Service import REST, check_param_in_list, RestServiceError
from BioREST.Uniprot import Uniprot
log = logging.getLogger(__name__)
class Psicquic(REST):
"""
Interface to the `PSICQUIC service
This service provides a common interface to more than 25 other services
related to protein. So, we won't detail all the possiblity of this service.
Here is an example that consists of looking for interactors of the
protein ZAP70 within the IntAct database::
s = PSICQUIC()
res = s.query("intact", "zap70")
len(res) # there are 11 interactions found
11
for x in res[1]:
print(x)
uniprotkb:O95169
uniprotkb:P43403
intact:EBI-716238
intact:EBI-1211276
psi-mi:ndub8_human(display_long)|uniprotkb:NADH-ubiquinone oxidoreductase ASHI
Here we have a list of entries. There are 15 of them (depending on
the *output* parameter). The meaning of the entries is described on PSICQUIC
website: https://code.google.com/p/psicquic/wiki/MITAB25Format . In short:
#. Unique identifier for interactor A
#. Unique identifier for interactor B.
#. Alternative identifier for interactor A, for example the official gene
#. Alternative identifier for interactor B.
#. Aliases for A, separated by "|
#. Aliases for B.
#. Interaction detection methods, taken from the corresponding PSI-MI
#. First author surname(s) of the publication(s)
#. Identifier of the publication
#. NCBI Taxonomy identifier for interactor A.
#. NCBI Taxonomy identifier for interactor B.
#. Interaction types,
#. Source databases and identifiers,
#. Interaction identifier(s) i
#. Confidence score. Denoted as scoreType:value.
Another example with reactome database::
res = s.query("reactome", "Q9Y266")
.. warning:: PSICQUIC gives access to 25 other services. We cannot create
a dedicated parsing for all of them. So, the ::`query` method returns
the raw data. Addition class may provide dedicated parsing in the
future.
"""
_formats = ["tab25", "tab26", "tab27", "xml25", "count", "biopax", "xgmml", "rdf-xml", "rdf-xml-abbrev", "rdf-n3",
"rdf-turtle"]
# note the typo in "genbank indentifier from bind DB
_mapping_uniprot = {"genbank indentifier": "P_GI",
'entrezgene/locuslink': "P_ENTREZGENEID",
'uniprotkb': "ACC+ID",
'rcsb pdb': "PDB_ID",
'ensembl': "ENSEMBL_ID",
'refseq': "P_REFSEQ_AC",
'hgnc': 'HGNC_ID',
"kegg": "KEGG_ID",
"entrez gene/locuslink": "P_ENTREZGENEID",
"chembl": "CHEMBL_ID",
"ddbj/embl/genbank": "EMBL_ID",
"dip": "DIP_ID",
"ensemblgenomes": "ENSEMBLGENOME_ID",
"omim": "MIM_ID",
"chebi": None,
"intact": None}
_retrieve_methods = ['interactor', 'interaction', 'query']
def __init__(self):
"""
Constructor
import PSICQUIC
s = PSICQUIC()
"""
super(Psicquic, self).__init__("PSICQUIC", url='http://www.ebi.ac.uk/Tools/webservices/psicquic')
self._registry = None
try:
self.uniprot = Uniprot()
except:
log.warning("UniProt service can't be initialised, needed for some parts")
self.buffer = {}
def _get_formats(self):
return Psicquic._formats
formats = property(_get_formats, doc="Returns the possible output formats")
def _get_active_db(self):
names = self.registry_names[:]
actives = self.registry_actives[:]
names = [x.lower() for x, y in zip(names, actives) if y == "true"]
return names
activeDBs = property(_get_active_db, doc="returns the active DBs only")
def read_registry(self):
"""
Reads and returns the active registry
"""
url = 'registry/registry?action=ACTIVE&format=txt'
res = self.http_get(url, frmt='txt')
return res.split()
def print_status(self, full=False):
"""
Prints the services that are available
:param full: print full information
:return: Nothing
The output is tabulated. The columns are:
* names
* active
* count
* version
* rest URL
* rest example
* restricted
"""
names = self.registry_names
counts = self.registry_counts
versions = self.registry_versions
actives = self.registry_actives
resturls = self.registry_resturls
restricted = self.registry_restricted
n = len(names)
for i in range(0, n):
if not full:
print("{0:15} {1:15} {2:15} {3}".format(names[i], actives[i], counts[i], versions[i]))
else:
print("%s\t %s\t %s\t %s\t %s \t %s\n" % (names[i], actives[i], counts[i], versions[i], resturls[i],
restricted[i]))
def __get_rest_url(self, service):
"""
Get the base REST url for the service
:param service: service name
:return: REST url
"""
names = [x.lower() for x in self.registry_names]
try:
index = names.index(service)
except ValueError:
raise ValueError("The service you gave (%s) is not registered. See self.registery_names" % service)
return self.registry_resturls[index]
def get_db_properties(self, service):
"""
Get db properties for the service
:param service:
:return:
"""
resturl = self.__get_rest_url(service)
url = resturl + 'properties'
res = self.http_get(url, frmt="txt")
if res is not None:
res = res.strip().split("\n")
return res
def get_all_db_properties(self, service=None):
"""
Get db properties for services
:param service:
:return: :raise ValueError:
"""
results = {}
if service is None:
service = [x.lower() for x in self.activeDBs]
for x in service:
if x not in self.activeDBs:
raise ValueError("database %s not in active databases" % x)
for name in service:
log.warning("Querying %s" % name)
res = self.get_db_properties(name)
results[name] = res
for name in service:
log.info("Found %s in %s" % (len(results[name]), name))
return results
def get_db_formats(self, service):
"""
Get db format for the service
:param service:
:return:
"""
resturl = self.__get_rest_url(service)
url = resturl + 'formats'
res = self.http_get(url, frmt="txt")
if res is not None:
res = res.strip().split("\n")
return res
def get_all_db_formats(self, service=None):
"""
Get db formats for services
:param service:
:return: :raise ValueError:
"""
results = {}
if service is None:
service = [x.lower() for x in self.activeDBs]
for x in service:
if x not in self.activeDBs:
raise ValueError("database %s not in active databases" % x)
for name in service:
log.warning("Querying %s" % name)
res = self.get_db_formats(name)
results[name] = res
for name in service:
log.info("Found %s in %s" % (len(results[name]), name))
return results
def get_db_version(self, service):
"""
Get db version for the service
:param service:
:return:
"""
resturl = self.__get_rest_url(service)
url = resturl + 'version'
res = self.http_get(url, frmt="txt")
if res is not None:
res = res.strip().split("\n")
return res
def get_all_db_version(self, service=None):
"""
Get db version for all services
:param service:
:return: :raise ValueError:
"""
results = {}
if service is None:
service = [x.lower() for x in self.activeDBs]
for x in service:
if x not in self.activeDBs:
raise ValueError("database %s not in active databases" % x)
for name in service:
log.warning("Querying %s" % name)
res = self.get_db_version(name)
results[name] = res
for name in service:
log.info("Found %s in %s" % (len(results[name]), name))
return results
def _get_registry(self):
if self._registry is None:
url = 'registry/registry?action=STATUS&format=xml'
res = self.http_get(url, frmt="xml")
res = self.easyXML(res)
self._registry = res
return self._registry
registry = property(_get_registry, doc="returns the registry of psicquic")
def _get_registry_names(self):
res = self.registry
return [x.findAll('name')[0].text for x in res.findAll("service")]
registry_names = property(_get_registry_names,
doc="returns all services available (names)")
def _get_registry_restricted(self):
res = self.registry
return [x.findAll('restricted')[0].text for x in res.findAll("service")]
registry_restricted = property(_get_registry_restricted,
doc="returns restricted status of services")
def _get_registry_resturl(self):
res = self.registry
data = [x.findAll('resturl')[0].text for x in res.findAll("service")]
return data
registry_resturls = property(_get_registry_resturl,
doc="returns URL of REST services")
def _get_registry_active(self):
res = self.registry
return [x.findAll('active')[0].text for x in res.findAll("service")]
registry_actives = property(_get_registry_active,
doc="returns active state of each service")
def _get_registry_count(self):
res = self.registry
return [x.findAll('count')[0].text for x in res.findAll("service")]
registry_counts = property(_get_registry_count,
doc="returns number of entries in each service")
def _get_registry_version(self):
res = self.registry
names = [x.findAll('name')[0].text for x in res.findAll("service")]
n = len(names)
version = [0] * n
for i in range(0, n):
x = res.findAll("service")[i]
if x.findAll("version"):
version[i] = x.findAll("version")[0].text
else:
version[i] = None
return version
registry_versions = property(_get_registry_version,
doc="returns version of each service")
@staticmethod
def _convert_tab2dict(data):
"""
https://code.google.com/p/psicquic/wiki/MITAB26Format
"""
results = []
for line in data:
results.append(line.split("\t"))
return results
def retrieve(self, service, query, methods='query', output="tab25", firstresult=None, maxresults=None,
compressed=True):
"""
Send a query to a specific database
:param methods: interation , interactor or query
:param maxresults: max results
:param firstresult: pos of first result
:param str service: a registered service. See :attr:`registry_names`.
:param str query: a valid query. Can be `*` or a protein name.
:param str output: a valid format. See s._formats
:param compressed: gzipped or not data, speedup and requests unzipped auto
s.query("intact", "brca2", "tab27")
s.query("intact", "zap70", "xml25")
s.query("matrixdb", "*", "xml25")
This is the programmatic approach to this website:
http://www.ebi.ac.uk/Tools/webservices/psicquic/view/main.xhtml
Another example consist in accessing the *string* database for fetching
protein-protein interaction data of a particular model organism. Here we
restrict the query to 100 results::
s.query("string", "species:10090", firstResult=0, maxResults=100, output="tab25")
# spaces are automatically converted
s.query("biogrid", "ZAP70 AND species:9606")
warning:: AND must be in big caps. Some database are ore permissive
than other (e.g., intact accepts "and"). species must be a valid ID number. Again, some DB are more
permissive and may accept the name (e.g., human)
To obtain the number of interactions in intact for the human specy::
len(p.query("intact", "species:9606"))
"""
if methods not in self._retrieve_methods:
raise ValueError("Retrieve methods {} don't exists".format(methods))
if service not in self.activeDBs:
raise ValueError("database %s not in active databases" % service)
params = {}
if output is not None:
check_param_in_list(output, self.formats)
params['format'] = output
else:
output = "none"
resturl = self.__get_rest_url(service)
if firstresult is not None:
params['firstResult'] = firstresult
if maxresults is not None:
params['maxResults'] = maxresults
if compressed:
params['compressed'] = 'y'
url = resturl + 'query/' + query
if "xml" in output:
res = self.http_get(url, frmt="xml", params=params)
else:
res = self.http_get(url, frmt="txt", params=params)
if res is not None:
res = res.strip().split("\n")
if output.startswith("tab"):
if res is not None:
res = self._convert_tab2dict(res)
return res
def retrieve_all(self, query, methods='query', databases=None, output="tab25", firstresult=None, maxresults=None,
compressed=True):
"""
Same as query but runs on all active database
:param methods: interation , interactor or query
:param maxresults: max results
:param firstresult: pos of first result
:param output: a valid format. See s._formats
:param query: a valid query. Can be `*` or a protein name.
:param list databases: database to query. Queries all active DB if not provided
:param compressed: gzipped or not data, speedup and requests unzipped auto
:return: dictionary where keys correspond to databases and values to the output of the query.
res = s.queryAll("ZAP70 AND species:9606")
"""
if methods not in self._retrieve_methods:
raise ValueError("Retrieve methods {} don't exists".format(methods))
results = {}
if databases is None:
databases = [x.lower() for x in self.activeDBs]
for x in databases:
if x not in self.activeDBs:
raise ValueError("database %s not in active databases" % x)
for name in databases:
try:
res = self.retrieve(service=name, query=query, methods=methods, output=output, firstresult=firstresult,
maxresults=maxresults, compressed=compressed)
if output.startswith("tab25"):
results[name] = [x for x in res if x != [""]]
else:
import copy
results[name] = copy.copy(res)
log.info("Found %s items in %s" % (len(results[name]), name))
except RestServiceError:
log.warning("Service {} unavailable".format(name))
return results
def count_interaction(self, query):
"""
Returns a dictionary with database as key and results as values
:param str query: a valid query
:return: a dictionary which key as database and value as number of entries
Consider only the active database.
"""
activedbs = self.activeDBs[:]
res = [(str(name), int(self.query(name, query, output="count")[0])) for name in activedbs]
return dict(res)
@staticmethod
def get_name(data):
"""
:param data:
:return:
"""
idsa = [x[0] for x in data]
idsb = [x[1] for x in data]
return idsa, idsb
def know_name(self, data):
"""
Scan all entries (MITAB) and returns simplified version
Each item in the input list of mitab entry
The output is made of 2 lists corresponding to
interactor A and B found in the mitab entries.
:param data:
elements in the input list takes the following forms::
DB1:ID1|DB2:ID2
DB3:ID3
The | sign separates equivalent IDs from different databases.
We want to keep only one. The first known databae is kept. If in the list of DB:ID pairs no known
database is found, then we keep the first one whatsover.
known databases are those available in the uniprot mapping tools.
chembl and chebi IDs are kept unchanged.
"""
log.info("converting data into known names")
idsa = [x[0].replace("\"", "") for x in data]
idsb = [x[1].replace("\"", "") for x in data]
# extract the first and second ID but let us check if it is part of a
# known uniprot mapping.Otherwise no conversion will be possible.
# If so, we set the ID to "unknown"
# remove the " character that can be found in a few cases (e.g,
# chebi:"CHEBI:29036")
# idsA = [x.replace("chebi:CHEBI:","chebi:") for x in idsA]
# idsB = [x.replace("chebi:CHEBI:", "chebi:") for x in idsB]
# special case:
# in mint, there is an entry that ends with a | uniprotkb:P17844|
idsa = [x.strip("|") for x in idsa]
idsb = [x.strip("|") for x in idsb]
# the first ID
for i, entry in enumerate(idsa):
try:
dbs = [x.split(":")[0] for x in entry.split("|")]
ids = [x.split(":")[1] for x in entry.split("|")]
valid_dbs = [(db, ID) for db, ID in zip(dbs, ids) if db in self._mapping_uniprot.keys()]
# search for an existing DB
if len(valid_dbs) >= 1:
idsa[i] = valid_dbs[0][0] + ":" + valid_dbs[0][1]
else:
log.warning("None of the DB for this entry (%s) are available" % entry)
idsa[i] = "?" + dbs[0] + ":" + ids[0]
except:
log.warning("Could not extract name from %s" % entry)
idsa[i] = "??:" + entry # we add a : so that we are sure that a split(":") will work
# the second ID
for i, entry in enumerate(idsb):
try:
dbs = [x.split(":")[0] for x in entry.split("|")]
ids = [x.split(":")[1] for x in entry.split("|")]
valid_dbs = [(db, ID) for db, ID in zip(dbs, ids) if db in self._mapping_uniprot.keys()]
# search for an existing DB
if len(valid_dbs) >= 1:
idsb[i] = valid_dbs[0][0] + ":" + valid_dbs[0][1]
else:
log.warning("None of the DB (%s) for this entry are available" % entry)
idsb[i] = "?" + dbs[0] + ":" + ids[0]
except:
log.warning("Could not extract name from %s" % entry)
idsb[i] = "??:" + entry
counta = len([x for x in idsa if x.startswith("?")])
countb = len([x for x in idsb if x.startswith("?")])
if counta + countb > 0:
print(" %s ids out of %s were not identified" % (counta + countb, len(idsa) * 2))
print(set([x.split(":")[0] for x in idsa if x.startswith("?")]))
print(set([x.split(":")[0] for x in idsb if x.startswith("?")]))
log.info("\033[0;33m[WARNING]\033[0m knownName done")
return idsa, idsb
@staticmethod
def pre_cleaning(data):
"""
remove entries ehre IdA or IdB is set to "-"
:param data:
"""
ret = [x for x in data if x[0] != "-" and x[1] != "-"]
return ret
def post_cleaning_all(self, data, keep_only="HUMAN", flatten=True, verbose=True):
"""
even more cleaing by ignoring score, db and interaction
len(set([(x[0],x[1]) for x in retnew]))
:param verbose:
:param flatten:
:param keep_only:
:param data:
"""
results = {}
for k in data.keys():
log.warning("Post cleaning %s" % k)
ret = self.post_cleaning(data[k], keep_only="HUMAN", verbose=verbose)
if len(ret):
results[k] = ret
if flatten:
results = [x for k in results.keys() for x in results[k]]
return results
@staticmethod
def post_cleaning(data, keep_only="HUMAN", remove_db=["chebi", "chembl"], keep_self_loop=False):
"""
Remove entries with a None and keep only those with the keep pattern
:param verbose:
:param keep_self_loop:
:param remove_db:
:param keep_only:
:param data:
"""
log.info("Before removing anything: ", len(data))
data = [x for x in data if x[0] is not None and x[1] is not None]
log.info("After removing the None: ", len(data))
data = [x for x in data if x[0].startswith("!") is False and x[1].startswith("!") is False]
log.info("After removing the !: ", len(data))
for db in remove_db:
data = [x for x in data if x[0].startswith(db) is False]
data = [x for x in data if x[1].startswith(db) is False]
log.info("After removing entries that match %s : " % db, len(data))
data = [x for x in data if keep_only in x[0] and keep_only in x[1]]
log.info("After removing entries that don't match %s : " % keep_only, len(data))
if keep_self_loop is False:
data = [x for x in data if x[0] != x[1]]
log.info("After removing self loop : ", len(data))
data = list(set(data))
log.info("After removing identical entries", len(data))
return data
def convert_all(self, data):
"""
:param data:
:return:
"""
results = {}
for k in data.keys():
log.info("Analysing %s" % k)
results[k] = self.convert(data[k], db=k)
return results
def convert(self, data, db=None):
"""
:param data:
:param db:
:return:
"""
log.info("Converting the database %s" % db)
idsa, idsb = self.know_name(data)
mapping = self.mapping_one_db(data)
results = []
for i, entry in enumerate(data):
x = idsa[i].split(":", 1)[1]
y = idsb[i].split(":", 1)[1]
xp = mapping[x]
yp = mapping[y]
try:
ref = entry[8]
except:
ref = "?"
try:
score = entry[14]
except:
score = "?"
try:
interaction = entry[11]
except:
interaction = "?"
results.append((xp, yp, score, interaction, ref, db))
return results
def mapping_one_db(self, data):
"""
:param data:
"""
query = {}
log.info("Converting IDs with proper DB name (knownName function)")
entriesa, entriesb = self.know_name(data) # idsA and B contains list of a single identifier of the form db:id
# the db is known from _mapping.uniprot otherwise it is called "unknown"
# get unique DBs to build the query dictionary
dbsa = [x.split(":")[0] for x in entriesa]
dbsb = [x.split(":")[0] for x in entriesb]
for x in set(dbsa):
query[x] = set()
for x in set(dbsb):
query[x] = set()
for k in query.keys():
if k.startswith("?"):
del query[k]
# the data to store
mapping = {}
n = len(data)
# scan all entries
counter = 0
for entryA, entryB in zip(entriesa, entriesb):
counter += 1
dba, ida = entryA.split(":")
try:
dbb, idb = entryB.split(":")
except:
print(entryB)
if ida not in mapping.keys():
if dba.startswith("?"):
mapping[ida] = entryA
else:
query[dba].add(ida)
if idb not in mapping.keys():
if dbb.startswith("?"):
mapping[idb] = entryB
else:
query[dbb].add(idb)
for k in query.keys():
if len(query[k]) > 2000 or counter == n:
this_query = list(query[k])
dbname = self._mapping_uniprot[k]
if dbname is not None:
log.info("Request sent to uniprot for %s database (%s/%s)" % (dbname, counter, n))
res = self.uniprot.mapping(fr=dbname, to="ID", query=" ".join(this_query))
for x in this_query:
if x not in res: # was not found
mapping[x] = "!" + k + ":" + x
else:
# we should be here since the queries are populated
# if not already in the mapping dictionary
if x not in res.keys():
raise ValueError(x)
if len(res[x]) == 1:
mapping[x] = res[x][0]
else:
log.info("Psicquic mapping found more than 1 id. keep first one")
mapping[x] = res[x][0]
else:
for x in this_query:
mapping[x] = k + ":" + x
query[k] = set()
for k in query.keys():
assert len(query[k]) == 0
return mapping
def __str__(self):
txt = self._get_active_db()
return txt
class AppsPPI(object):
"""
This is an application based on PPI that search for relevant interactions
Interctions between proteins may have a score provided by each database.
However, scores are sometimes ommited. Besides, they may have different
meaning for different databases. Another way to score an interaction is to
count in how many database it is found.
This class works as follows. First, you query a protein:
p = AppsPPI()
p.query("ZAP70 AND species:9606")
This, is going to call the PSICQUIC queryAll method to send this query to
all active databases. Then, it calls the convertAll functions to convert all
interactors names into uniprot name if possible. If not, interactions are
not taken into account. Finally, it removes duplicated and performs some
cleaning inside the postCleaningall method.
Then, you can call the summary method that counts the interactions. The
count is stored in the attribute relevant_interactions.
p.summary()
Let us see how many interactions where found with. THe number of databases
that contains at least one interactions is
p.N
p.relevant_interactions[N]
[['ZAP70_HUMAN', 'DBNL_HUMAN']]
So, there was 1 interaction found in all databases.
"""
def __init__(self, verbose=False):
self.psicquic = Psicquic()
self.verbose = verbose
self.counter = None
self.relevant_interactions = None
self.interactions = {}
self.N = None
self.results_query = None
def query_all(self, query, databases=None):
"""
:param str query: a valid query.
:param str databases: by default, queries are sent to each active database.
you can overwrite this behavious by providing your own list of
databases
:return: nothing but the interactions attributes is populated with a
dictionary where keys correspond to each database that returned a non empty list
of interactions. The item for each key is a list of interactions containing the
interactors A and B, the score, the type of interactions and the score.
"""
# self.results_query = self.psicquic.queryAll("ZAP70 AND species:9606")
log.warning("Requests sent to psicquic. Can take a while, please be patient...")
self.results_query = self.psicquic.retrieve_all(query, databases)
self.interactions = self.psicquic.convert_all(self.results_query)
self.interactions = self.psicquic.post_cleaning_all(self.interactions,
flatten=False, verbose=self.verbose)
self.N = len(self.interactions.keys())
self.counter = {}
self.relevant_interactions = {}
def summary(self):
"""
Build some summary related to the found interactions from queryAll
:return: nothing but the relevant_interactions and counter attribute
p = AppsPPI()
p.queryAll("ZAP70 AND species:9606")
p.summary()
"""
for k, v in self.interactions.items():
log.info("Found %s interactions within %s database" % (len(v), k))
counter = {}
for k in self.interactions.keys():
# scan each dabase
for v in self.interactions[k]:
interaction = v[0] + "++" + v[1]
db = v[5]
if interaction in counter.keys():
counter[interaction].append(db)
else:
counter[interaction] = [db]
for k in counter.keys():
counter[k] = list(set(counter[k]))
n = len(self.interactions.keys())
print("-------------")
summ = {}
for i in range(1, n + 1):
res = [(x.split("++"), counter[x]) for x in counter.keys() if len(counter[x]) == i]
log.info("Found %s interactions in %s common databases" % (len(res), i))
res = [x.split("++") for x in counter.keys() if len(counter[x]) == i]
if len(res):
summ[i] = [x for x in res]
else:
summ[i] = []
self.counter = counter.copy()
self.relevant_interactions = summ.copy()
def get_reference(self, ida, idb):
"""
:param ida:
:param idb:
"""
key = ida + "++" + idb
uniq = len(self.counter[key])
ret = [x for k in self.interactions.keys() for x in self.interactions[k] if x[0] == ida and x[1] == idb]
N = len(ret)
log.info("Interactions %s -- %s has %s entries in %s databases (%s):" % (ida, idb, N, uniq, self.counter[key]))
for r in ret:
print(r[5], " reference", r[4])
def show_pie(self):
"""
a simple example to demonstrate how to visualise number of
interactions found in various databases
"""
try:
from pylab import pie, clf, title, show, legend
except ImportError:
raise ImportError("You must install pylab/matplotlib to use this functionality")
labels = range(1, self.N + 1)
print(labels)
counting = [len(self.relevant_interactions[i]) for i in labels]
clf()
# pie(counting, labels=[str(int(x)) for x in labels], shadow=True)
pie(counting, labels=[str(x) for x in counting], shadow=True)
title("Number of interactions found in N databases")
legend([str(x) + " database(s)" for x in labels])
show()
| gpl-3.0 |
anntzer/scikit-learn | examples/inspection/plot_linear_model_coefficient_interpretation.py | 9 | 23016 | """
==================================================================
Common pitfalls in interpretation of coefficients of linear models
==================================================================
In linear models, the target value is modeled as
a linear combination of the features (see the :ref:`linear_model` User Guide
section for a description of a set of linear models available in
scikit-learn).
Coefficients in multiple linear models represent the relationship between the
given feature, :math:`X_i` and the target, :math:`y`, assuming that all the
other features remain constant (`conditional dependence
<https://en.wikipedia.org/wiki/Conditional_dependence>`_).
This is different from plotting :math:`X_i` versus :math:`y` and fitting a
linear relationship: in that case all possible values of the other features are
taken into account in the estimation (marginal dependence).
This example will provide some hints in interpreting coefficient in linear
models, pointing at problems that arise when either the linear model is not
appropriate to describe the dataset, or when features are correlated.
We will use data from the `"Current Population Survey"
<https://www.openml.org/d/534>`_ from 1985 to predict
wage as a function of various features such as experience, age, or education.
.. contents::
:local:
:depth: 1
"""
print(__doc__)
import numpy as np
import scipy as sp
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# %%
# The dataset: wages
# ------------------
#
# We fetch the data from `OpenML <http://openml.org/>`_.
# Note that setting the parameter `as_frame` to True will retrieve the data
# as a pandas dataframe.
from sklearn.datasets import fetch_openml
survey = fetch_openml(data_id=534, as_frame=True)
# %%
# Then, we identify features `X` and targets `y`: the column WAGE is our
# target variable (i.e., the variable which we want to predict).
#
X = survey.data[survey.feature_names]
X.describe(include="all")
# %%
# Note that the dataset contains categorical and numerical variables.
# We will need to take this into account when preprocessing the dataset
# thereafter.
X.head()
# %%
# Our target for prediction: the wage.
# Wages are described as floating-point number in dollars per hour.
y = survey.target.values.ravel()
survey.target.head()
# %%
# We split the sample into a train and a test dataset.
# Only the train dataset will be used in the following exploratory analysis.
# This is a way to emulate a real situation where predictions are performed on
# an unknown target, and we don't want our analysis and decisions to be biased
# by our knowledge of the test data.
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=42
)
# %%
# First, let's get some insights by looking at the variable distributions and
# at the pairwise relationships between them. Only numerical
# variables will be used. In the following plot, each dot represents a sample.
#
# .. _marginal_dependencies:
train_dataset = X_train.copy()
train_dataset.insert(0, "WAGE", y_train)
_ = sns.pairplot(train_dataset, kind='reg', diag_kind='kde')
# %%
# Looking closely at the WAGE distribution reveals that it has a
# long tail. For this reason, we should take its logarithm
# to turn it approximately into a normal distribution (linear models such
# as ridge or lasso work best for a normal distribution of error).
#
# The WAGE is increasing when EDUCATION is increasing.
# Note that the dependence between WAGE and EDUCATION
# represented here is a marginal dependence, i.e., it describes the behavior
# of a specific variable without keeping the others fixed.
#
# Also, the EXPERIENCE and AGE are strongly linearly correlated.
#
# .. _the-pipeline:
#
# The machine-learning pipeline
# -----------------------------
#
# To design our machine-learning pipeline, we first manually
# check the type of data that we are dealing with:
survey.data.info()
# %%
# As seen previously, the dataset contains columns with different data types
# and we need to apply a specific preprocessing for each data types.
# In particular categorical variables cannot be included in linear model if not
# coded as integers first. In addition, to avoid categorical features to be
# treated as ordered values, we need to one-hot-encode them.
# Our pre-processor will
#
# - one-hot encode (i.e., generate a column by category) the categorical
# columns;
# - as a first approach (we will see after how the normalisation of numerical
# values will affect our discussion), keep numerical values as they are.
from sklearn.compose import make_column_transformer
from sklearn.preprocessing import OneHotEncoder
categorical_columns = ['RACE', 'OCCUPATION', 'SECTOR',
'MARR', 'UNION', 'SEX', 'SOUTH']
numerical_columns = ['EDUCATION', 'EXPERIENCE', 'AGE']
preprocessor = make_column_transformer(
(OneHotEncoder(drop='if_binary'), categorical_columns),
remainder='passthrough'
)
# %%
# To describe the dataset as a linear model we use a ridge regressor
# with a very small regularization and to model the logarithm of the WAGE.
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import Ridge
from sklearn.compose import TransformedTargetRegressor
model = make_pipeline(
preprocessor,
TransformedTargetRegressor(
regressor=Ridge(alpha=1e-10),
func=np.log10,
inverse_func=sp.special.exp10
)
)
# %%
# Processing the dataset
# ----------------------
#
# First, we fit the model.
_ = model.fit(X_train, y_train)
# %%
# Then we check the performance of the computed model plotting its predictions
# on the test set and computing,
# for example, the median absolute error of the model.
from sklearn.metrics import median_absolute_error
y_pred = model.predict(X_train)
mae = median_absolute_error(y_train, y_pred)
string_score = f'MAE on training set: {mae:.2f} $/hour'
y_pred = model.predict(X_test)
mae = median_absolute_error(y_test, y_pred)
string_score += f'\nMAE on testing set: {mae:.2f} $/hour'
fig, ax = plt.subplots(figsize=(5, 5))
plt.scatter(y_test, y_pred)
ax.plot([0, 1], [0, 1], transform=ax.transAxes, ls="--", c="red")
plt.text(3, 20, string_score)
plt.title('Ridge model, small regularization')
plt.ylabel('Model predictions')
plt.xlabel('Truths')
plt.xlim([0, 27])
_ = plt.ylim([0, 27])
# %%
# The model learnt is far from being a good model making accurate predictions:
# this is obvious when looking at the plot above, where good predictions
# should lie on the red line.
#
# In the following section, we will interpret the coefficients of the model.
# While we do so, we should keep in mind that any conclusion we draw is
# about the model that we build, rather than about the true (real-world)
# generative process of the data.
#
# Interpreting coefficients: scale matters
# ---------------------------------------------
#
# First of all, we can take a look to the values of the coefficients of the
# regressor we have fitted.
feature_names = (model.named_steps['columntransformer']
.named_transformers_['onehotencoder']
.get_feature_names(input_features=categorical_columns))
feature_names = np.concatenate(
[feature_names, numerical_columns])
coefs = pd.DataFrame(
model.named_steps['transformedtargetregressor'].regressor_.coef_,
columns=['Coefficients'], index=feature_names
)
coefs
# %%
# The AGE coefficient is expressed in "dollars/hour per living years" while the
# EDUCATION one is expressed in "dollars/hour per years of education". This
# representation of the coefficients has the benefit of making clear the
# practical predictions of the model: an increase of :math:`1` year in AGE
# means a decrease of :math:`0.030867` dollars/hour, while an increase of
# :math:`1` year in EDUCATION means an increase of :math:`0.054699`
# dollars/hour. On the other hand, categorical variables (as UNION or SEX) are
# adimensional numbers taking either the value 0 or 1. Their coefficients
# are expressed in dollars/hour. Then, we cannot compare the magnitude of
# different coefficients since the features have different natural scales, and
# hence value ranges, because of their different unit of measure. This is more
# visible if we plot the coefficients.
coefs.plot(kind='barh', figsize=(9, 7))
plt.title('Ridge model, small regularization')
plt.axvline(x=0, color='.5')
plt.subplots_adjust(left=.3)
# %%
# Indeed, from the plot above the most important factor in determining WAGE
# appears to be the
# variable UNION, even if our intuition might tell us that variables
# like EXPERIENCE should have more impact.
#
# Looking at the coefficient plot to gauge feature importance can be
# misleading as some of them vary on a small scale, while others, like AGE,
# varies a lot more, several decades.
#
# This is visible if we compare the standard deviations of different
# features.
X_train_preprocessed = pd.DataFrame(
model.named_steps['columntransformer'].transform(X_train),
columns=feature_names
)
X_train_preprocessed.std(axis=0).plot(kind='barh', figsize=(9, 7))
plt.title('Features std. dev.')
plt.subplots_adjust(left=.3)
# %%
# Multiplying the coefficients by the standard deviation of the related
# feature would reduce all the coefficients to the same unit of measure.
# As we will see :ref:`after<scaling_num>` this is equivalent to normalize
# numerical variables to their standard deviation,
# as :math:`y = \sum{coef_i \times X_i} =
# \sum{(coef_i \times std_i) \times (X_i / std_i)}`.
#
# In that way, we emphasize that the
# greater the variance of a feature, the larger the weight of the corresponding
# coefficient on the output, all else being equal.
coefs = pd.DataFrame(
model.named_steps['transformedtargetregressor'].regressor_.coef_ *
X_train_preprocessed.std(axis=0),
columns=['Coefficient importance'], index=feature_names
)
coefs.plot(kind='barh', figsize=(9, 7))
plt.title('Ridge model, small regularization')
plt.axvline(x=0, color='.5')
plt.subplots_adjust(left=.3)
# %%
# Now that the coefficients have been scaled, we can safely compare them.
#
# .. warning::
#
# Why does the plot above suggest that an increase in age leads to a
# decrease in wage? Why the :ref:`initial pairplot
# <marginal_dependencies>` is telling the opposite?
#
# The plot above tells us about dependencies between a specific feature and
# the target when all other features remain constant, i.e., **conditional
# dependencies**. An increase of the AGE will induce a decrease
# of the WAGE when all other features remain constant. On the contrary, an
# increase of the EXPERIENCE will induce an increase of the WAGE when all
# other features remain constant.
# Also, AGE, EXPERIENCE and EDUCATION are the three variables that most
# influence the model.
#
# Checking the variability of the coefficients
# --------------------------------------------
#
# We can check the coefficient variability through cross-validation:
# it is a form of data perturbation (related to
# `resampling <https://en.wikipedia.org/wiki/Resampling_(statistics)>`_).
#
# If coefficients vary significantly when changing the input dataset
# their robustness is not guaranteed, and they should probably be interpreted
# with caution.
from sklearn.model_selection import cross_validate
from sklearn.model_selection import RepeatedKFold
cv_model = cross_validate(
model, X, y, cv=RepeatedKFold(n_splits=5, n_repeats=5),
return_estimator=True, n_jobs=-1
)
coefs = pd.DataFrame(
[est.named_steps['transformedtargetregressor'].regressor_.coef_ *
X_train_preprocessed.std(axis=0)
for est in cv_model['estimator']],
columns=feature_names
)
plt.figure(figsize=(9, 7))
sns.stripplot(data=coefs, orient='h', color='k', alpha=0.5)
sns.boxplot(data=coefs, orient='h', color='cyan', saturation=0.5)
plt.axvline(x=0, color='.5')
plt.xlabel('Coefficient importance')
plt.title('Coefficient importance and its variability')
plt.subplots_adjust(left=.3)
# %%
# The problem of correlated variables
# -----------------------------------
#
# The AGE and EXPERIENCE coefficients are affected by strong variability which
# might be due to the collinearity between the 2 features: as AGE and
# EXPERIENCE vary together in the data, their effect is difficult to tease
# apart.
#
# To verify this interpretation we plot the variability of the AGE and
# EXPERIENCE coefficient.
#
# .. _covariation:
plt.ylabel('Age coefficient')
plt.xlabel('Experience coefficient')
plt.grid(True)
plt.xlim(-0.4, 0.5)
plt.ylim(-0.4, 0.5)
plt.scatter(coefs["AGE"], coefs["EXPERIENCE"])
_ = plt.title('Co-variations of coefficients for AGE and EXPERIENCE '
'across folds')
# %%
# Two regions are populated: when the EXPERIENCE coefficient is
# positive the AGE one is negative and viceversa.
#
# To go further we remove one of the 2 features and check what is the impact
# on the model stability.
column_to_drop = ['AGE']
cv_model = cross_validate(
model, X.drop(columns=column_to_drop), y,
cv=RepeatedKFold(n_splits=5, n_repeats=5),
return_estimator=True, n_jobs=-1
)
coefs = pd.DataFrame(
[est.named_steps['transformedtargetregressor'].regressor_.coef_ *
X_train_preprocessed.drop(columns=column_to_drop).std(axis=0)
for est in cv_model['estimator']],
columns=feature_names[:-1]
)
plt.figure(figsize=(9, 7))
sns.stripplot(data=coefs, orient='h', color='k', alpha=0.5)
sns.boxplot(data=coefs, orient='h', color='cyan', saturation=0.5)
plt.axvline(x=0, color='.5')
plt.title('Coefficient importance and its variability')
plt.xlabel('Coefficient importance')
plt.subplots_adjust(left=.3)
# %%
# The estimation of the EXPERIENCE coefficient is now less variable and
# remain important for all models trained during cross-validation.
#
# .. _scaling_num:
#
# Preprocessing numerical variables
# ---------------------------------
#
# As said above (see ":ref:`the-pipeline`"), we could also choose to scale
# numerical values before training the model.
# This can be useful to apply a similar amount regularization to all of them
# in the Ridge.
# The preprocessor is redefined in order to subtract the mean and scale
# variables to unit variance.
from sklearn.preprocessing import StandardScaler
preprocessor = make_column_transformer(
(OneHotEncoder(drop='if_binary'), categorical_columns),
(StandardScaler(), numerical_columns),
remainder='passthrough'
)
# %%
# The model will stay unchanged.
model = make_pipeline(
preprocessor,
TransformedTargetRegressor(
regressor=Ridge(alpha=1e-10),
func=np.log10,
inverse_func=sp.special.exp10
)
)
_ = model.fit(X_train, y_train)
# %%
# Again, we check the performance of the computed
# model using, for example, the median absolute error of the model and the R
# squared coefficient.
y_pred = model.predict(X_train)
mae = median_absolute_error(y_train, y_pred)
string_score = f'MAE on training set: {mae:.2f} $/hour'
y_pred = model.predict(X_test)
mae = median_absolute_error(y_test, y_pred)
string_score += f'\nMAE on testing set: {mae:.2f} $/hour'
fig, ax = plt.subplots(figsize=(6, 6))
plt.scatter(y_test, y_pred)
ax.plot([0, 1], [0, 1], transform=ax.transAxes, ls="--", c="red")
plt.text(3, 20, string_score)
plt.title('Ridge model, small regularization, normalized variables')
plt.ylabel('Model predictions')
plt.xlabel('Truths')
plt.xlim([0, 27])
_ = plt.ylim([0, 27])
# %%
# For the coefficient analysis, scaling is not needed this time.
coefs = pd.DataFrame(
model.named_steps['transformedtargetregressor'].regressor_.coef_,
columns=['Coefficients'], index=feature_names
)
coefs.plot(kind='barh', figsize=(9, 7))
plt.title('Ridge model, small regularization, normalized variables')
plt.axvline(x=0, color='.5')
plt.subplots_adjust(left=.3)
# %%
# We now inspect the coefficients across several cross-validation folds.
cv_model = cross_validate(
model, X, y, cv=RepeatedKFold(n_splits=5, n_repeats=5),
return_estimator=True, n_jobs=-1
)
coefs = pd.DataFrame(
[est.named_steps['transformedtargetregressor'].regressor_.coef_
for est in cv_model['estimator']],
columns=feature_names
)
plt.figure(figsize=(9, 7))
sns.stripplot(data=coefs, orient='h', color='k', alpha=0.5)
sns.boxplot(data=coefs, orient='h', color='cyan', saturation=0.5)
plt.axvline(x=0, color='.5')
plt.title('Coefficient variability')
plt.subplots_adjust(left=.3)
# %%
# The result is quite similar to the non-normalized case.
#
# Linear models with regularization
# ---------------------------------
#
# In machine-learning practice, Ridge Regression is more often used with
# non-negligible regularization.
#
# Above, we limited this regularization to a very little amount.
# Regularization improves the conditioning of the problem and reduces the
# variance of the estimates. RidgeCV applies cross validation in order to
# determine which value of the regularization parameter (`alpha`) is best
# suited for prediction.
from sklearn.linear_model import RidgeCV
model = make_pipeline(
preprocessor,
TransformedTargetRegressor(
regressor=RidgeCV(alphas=np.logspace(-10, 10, 21)),
func=np.log10,
inverse_func=sp.special.exp10
)
)
_ = model.fit(X_train, y_train)
# %%
# First we check which value of :math:`\alpha` has been selected.
model[-1].regressor_.alpha_
# %%
# Then we check the quality of the predictions.
y_pred = model.predict(X_train)
mae = median_absolute_error(y_train, y_pred)
string_score = f'MAE on training set: {mae:.2f} $/hour'
y_pred = model.predict(X_test)
mae = median_absolute_error(y_test, y_pred)
string_score += f'\nMAE on testing set: {mae:.2f} $/hour'
fig, ax = plt.subplots(figsize=(6, 6))
plt.scatter(y_test, y_pred)
ax.plot([0, 1], [0, 1], transform=ax.transAxes, ls="--", c="red")
plt.text(3, 20, string_score)
plt.title('Ridge model, regularization, normalized variables')
plt.ylabel('Model predictions')
plt.xlabel('Truths')
plt.xlim([0, 27])
_ = plt.ylim([0, 27])
# %%
# The ability to reproduce the data of the regularized model is similar to
# the one of the non-regularized model.
coefs = pd.DataFrame(
model.named_steps['transformedtargetregressor'].regressor_.coef_,
columns=['Coefficients'], index=feature_names
)
coefs.plot(kind='barh', figsize=(9, 7))
plt.title('Ridge model, regularization, normalized variables')
plt.axvline(x=0, color='.5')
plt.subplots_adjust(left=.3)
# %%
# The coefficients are significantly different.
# AGE and EXPERIENCE coefficients are both positive but they now have less
# influence on the prediction.
#
# The regularization reduces the influence of correlated
# variables on the model because the weight is shared between the two
# predictive variables, so neither alone would have strong weights.
#
# On the other hand, the weights obtained with regularization are more
# stable (see the :ref:`ridge_regression` User Guide section). This
# increased stability is visible from the plot, obtained from data
# perturbations, in a cross validation. This plot can be compared with
# the :ref:`previous one<covariation>`.
cv_model = cross_validate(
model, X, y, cv=RepeatedKFold(n_splits=5, n_repeats=5),
return_estimator=True, n_jobs=-1
)
coefs = pd.DataFrame(
[est.named_steps['transformedtargetregressor'].regressor_.coef_ *
X_train_preprocessed.std(axis=0)
for est in cv_model['estimator']],
columns=feature_names
)
plt.ylabel('Age coefficient')
plt.xlabel('Experience coefficient')
plt.grid(True)
plt.xlim(-0.4, 0.5)
plt.ylim(-0.4, 0.5)
plt.scatter(coefs["AGE"], coefs["EXPERIENCE"])
_ = plt.title('Co-variations of coefficients for AGE and EXPERIENCE '
'across folds')
# %%
# Linear models with sparse coefficients
# --------------------------------------
#
# Another possibility to take into account correlated variables in the dataset,
# is to estimate sparse coefficients. In some way we already did it manually
# when we dropped the AGE column in a previous Ridge estimation.
#
# Lasso models (see the :ref:`lasso` User Guide section) estimates sparse
# coefficients. LassoCV applies cross validation in order to
# determine which value of the regularization parameter (`alpha`) is best
# suited for the model estimation.
from sklearn.linear_model import LassoCV
model = make_pipeline(
preprocessor,
TransformedTargetRegressor(
regressor=LassoCV(alphas=np.logspace(-10, 10, 21), max_iter=100000),
func=np.log10,
inverse_func=sp.special.exp10
)
)
_ = model.fit(X_train, y_train)
# %%
# First we verify which value of :math:`\alpha` has been selected.
model[-1].regressor_.alpha_
# %%
# Then we check the quality of the predictions.
y_pred = model.predict(X_train)
mae = median_absolute_error(y_train, y_pred)
string_score = f'MAE on training set: {mae:.2f} $/hour'
y_pred = model.predict(X_test)
mae = median_absolute_error(y_test, y_pred)
string_score += f'\nMAE on testing set: {mae:.2f} $/hour'
fig, ax = plt.subplots(figsize=(6, 6))
plt.scatter(y_test, y_pred)
ax.plot([0, 1], [0, 1], transform=ax.transAxes, ls="--", c="red")
plt.text(3, 20, string_score)
plt.title('Lasso model, regularization, normalized variables')
plt.ylabel('Model predictions')
plt.xlabel('Truths')
plt.xlim([0, 27])
_ = plt.ylim([0, 27])
# %%
# For our dataset, again the model is not very predictive.
coefs = pd.DataFrame(
model.named_steps['transformedtargetregressor'].regressor_.coef_,
columns=['Coefficients'], index=feature_names
)
coefs.plot(kind='barh', figsize=(9, 7))
plt.title('Lasso model, regularization, normalized variables')
plt.axvline(x=0, color='.5')
plt.subplots_adjust(left=.3)
# %%
# A Lasso model identifies the correlation between
# AGE and EXPERIENCE and suppresses one of them for the sake of the prediction.
#
# It is important to keep in mind that the coefficients that have been
# dropped may still be related to the outcome by themselves: the model
# chose to suppress them because they bring little or no additional
# information on top of the other features. Additionnaly, this selection
# is unstable for correlated features, and should be interpreted with
# caution.
#
# Lessons learned
# ---------------
#
# * Coefficients must be scaled to the same unit of measure to retrieve
# feature importance. Scaling them with the standard-deviation of the
# feature is a useful proxy.
# * Coefficients in multivariate linear models represent the dependency
# between a given feature and the target, **conditional** on the other
# features.
# * Correlated features induce instabilities in the coefficients of linear
# models and their effects cannot be well teased apart.
# * Different linear models respond differently to feature correlation and
# coefficients could significantly vary from one another.
# * Inspecting coefficients across the folds of a cross-validation loop
# gives an idea of their stability.
| bsd-3-clause |
cainiaocome/scikit-learn | sklearn/metrics/ranking.py | 75 | 25426 | """Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.fixes import bincount
from ..utils.stats import rankdata
from ..utils.sparsefuncs import count_nonzero
from .base import _average_binary_score
from .base import UndefinedMetricWarning
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds := len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (np.all(classes == [0, 1]) or
np.all(classes == [-1, 1]) or
np.all(classes == [0]) or
np.all(classes == [-1]) or
np.all(classes == [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds := len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, sample_weight=None):
"""Compute Ranking loss measure
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
| bsd-3-clause |
mengyun1993/RNN-binary | history code/rnn_minibatch.py | 8 | 39709 | """ Vanilla RNN
Parallelizes scan over sequences by using mini-batches.
@author Graham Taylor
"""
import numpy as np
import theano
import theano.tensor as T
from sklearn.base import BaseEstimator
import logging
import time
import os
import datetime
import cPickle as pickle
logger = logging.getLogger(__name__)
import matplotlib.pyplot as plt
plt.ion()
mode = theano.Mode(linker='cvm')
#mode = 'DEBUG_MODE'
class RNN(object):
""" Recurrent neural network class
Supported output types:
real : linear output units, use mean-squared error
binary : binary output units, use cross-entropy error
softmax : single softmax out, use cross-entropy error
"""
def __init__(self, input, n_in, n_hidden, n_out, activation=T.tanh,
output_type='real'):
self.input = input
self.activation = activation
self.output_type = output_type
self.batch_size = T.iscalar()
# theta is a vector of all trainable parameters
# it represents the value of W, W_in, W_out, h0, bh, by
theta_shape = n_hidden ** 2 + n_in * n_hidden + n_hidden * n_out + \
n_hidden + n_hidden + n_out
self.theta = theano.shared(value=np.zeros(theta_shape,
dtype=theano.config.floatX))
# Parameters are reshaped views of theta
param_idx = 0 # pointer to somewhere along parameter vector
# recurrent weights as a shared variable
self.W = self.theta[param_idx:(param_idx + n_hidden ** 2)].reshape(
(n_hidden, n_hidden))
self.W.name = 'W'
W_init = np.asarray(np.random.uniform(size=(n_hidden, n_hidden),
low=-0.01, high=0.01),
dtype=theano.config.floatX)
param_idx += n_hidden ** 2
# input to hidden layer weights
self.W_in = self.theta[param_idx:(param_idx + n_in * \
n_hidden)].reshape((n_in, n_hidden))
self.W_in.name = 'W_in'
W_in_init = np.asarray(np.random.uniform(size=(n_in, n_hidden),
low=-0.01, high=0.01),
dtype=theano.config.floatX)
param_idx += n_in * n_hidden
# hidden to output layer weights
self.W_out = self.theta[param_idx:(param_idx + n_hidden * \
n_out)].reshape((n_hidden, n_out))
self.W_out.name = 'W_out'
W_out_init = np.asarray(np.random.uniform(size=(n_hidden, n_out),
low=-0.01, high=0.01),
dtype=theano.config.floatX)
param_idx += n_hidden * n_out
self.h0 = self.theta[param_idx:(param_idx + n_hidden)]
self.h0.name = 'h0'
h0_init = np.zeros((n_hidden,), dtype=theano.config.floatX)
param_idx += n_hidden
self.bh = self.theta[param_idx:(param_idx + n_hidden)]
self.bh.name = 'bh'
bh_init = np.zeros((n_hidden,), dtype=theano.config.floatX)
param_idx += n_hidden
self.by = self.theta[param_idx:(param_idx + n_out)]
self.by.name = 'by'
by_init = np.zeros((n_out,), dtype=theano.config.floatX)
param_idx += n_out
assert(param_idx == theta_shape)
# for convenience
self.params = [self.W, self.W_in, self.W_out, self.h0, self.bh,
self.by]
# shortcut to norms (for monitoring)
self.l2_norms = {}
for param in self.params:
self.l2_norms[param] = T.sqrt(T.sum(param ** 2))
# initialize parameters
# DEBUG_MODE gives division by zero error when we leave parameters
# as zeros
self.theta.set_value(np.concatenate([x.ravel() for x in
(W_init, W_in_init, W_out_init, h0_init, bh_init, by_init)]))
self.theta_update = theano.shared(
value=np.zeros(theta_shape, dtype=theano.config.floatX))
# recurrent function (using tanh activation function) and arbitrary output
# activation function
def step(x_t, h_tm1):
h_t = self.activation(T.dot(x_t, self.W_in) + \
T.dot(h_tm1, self.W) + self.bh)
y_t = T.dot(h_t, self.W_out) + self.by
return h_t, y_t
# the hidden state `h` for the entire sequence, and the output for the
# entire sequence `y` (first dimension is always time)
# Note the implementation of weight-sharing h0 across variable-size
# batches using T.ones multiplying h0
# Alternatively, T.alloc approach is more robust
[self.h, self.y_pred], _ = theano.scan(step,
sequences=self.input,
outputs_info=[T.alloc(self.h0, self.input.shape[1],
n_hidden), None])
# outputs_info=[T.ones(shape=(self.input.shape[1],
# self.h0.shape[0])) * self.h0, None])
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
self.L1 = 0
self.L1 += abs(self.W.sum())
self.L1 += abs(self.W_in.sum())
self.L1 += abs(self.W_out.sum())
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L2_sqr = 0
self.L2_sqr += (self.W ** 2).sum()
self.L2_sqr += (self.W_in ** 2).sum()
self.L2_sqr += (self.W_out ** 2).sum()
if self.output_type == 'real':
self.loss = lambda y: self.mse(y)
elif self.output_type == 'binary':
# push through sigmoid
self.p_y_given_x = T.nnet.sigmoid(self.y_pred) # apply sigmoid
self.y_out = T.round(self.p_y_given_x) # round to {0,1}
self.loss = lambda y: self.nll_binary(y)
elif self.output_type == 'softmax':
# push through softmax, computing vector of class-membership
# probabilities in symbolic form
#
# T.nnet.softmax will not operate on T.tensor3 types, only matrices
# We take our n_steps x n_seq x n_classes output from the net
# and reshape it into a (n_steps * n_seq) x n_classes matrix
# apply softmax, then reshape back
y_p = self.y_pred
y_p_m = T.reshape(y_p, (y_p.shape[0] * y_p.shape[1], -1))
y_p_s = T.nnet.softmax(y_p_m)
self.p_y_given_x = T.reshape(y_p_s, y_p.shape)
# compute prediction as class whose probability is maximal
self.y_out = T.argmax(self.p_y_given_x, axis=-1)
self.loss = lambda y: self.nll_multiclass(y)
else:
raise NotImplementedError
def mse(self, y):
# error between output and target
return T.mean((self.y_pred - y) ** 2)
def nll_binary(self, y):
# negative log likelihood based on binary cross entropy error
return T.mean(T.nnet.binary_crossentropy(self.p_y_given_x, y))
def nll_multiclass(self, y):
# negative log likelihood based on multiclass cross entropy error
#
# Theano's advanced indexing is limited
# therefore we reshape our n_steps x n_seq x n_classes tensor3 of probs
# to a (n_steps * n_seq) x n_classes matrix of probs
# so that we can use advanced indexing (i.e. get the probs which
# correspond to the true class)
# the labels y also must be flattened when we do this to use the
# advanced indexing
p_y = self.p_y_given_x
p_y_m = T.reshape(p_y, (p_y.shape[0] * p_y.shape[1], -1))
y_f = y.flatten(ndim=1)
return -T.mean(T.log(p_y_m)[T.arange(p_y_m.shape[0]), y_f])
def errors(self, y):
"""Return a float representing the number of errors in the minibatch
over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_out.ndim:
raise TypeError('y should have the same shape as self.y_out',
('y', y.type, 'y_out', self.y_out.type))
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_out, y))
else:
raise NotImplementedError()
class MetaRNN(BaseEstimator):
def __init__(self, n_in=5, n_hidden=50, n_out=5, learning_rate=0.01,
n_epochs=100, batch_size=100, L1_reg=0.00, L2_reg=0.00,
learning_rate_decay=1,
activation='tanh', output_type='real', final_momentum=0.9,
initial_momentum=0.5, momentum_switchover=5,
snapshot_every=None, snapshot_path='/tmp'):
self.n_in = int(n_in)
self.n_hidden = int(n_hidden)
self.n_out = int(n_out)
self.learning_rate = float(learning_rate)
self.learning_rate_decay = float(learning_rate_decay)
self.n_epochs = int(n_epochs)
self.batch_size = int(batch_size)
self.L1_reg = float(L1_reg)
self.L2_reg = float(L2_reg)
self.activation = activation
self.output_type = output_type
self.initial_momentum = float(initial_momentum)
self.final_momentum = float(final_momentum)
self.momentum_switchover = int(momentum_switchover)
if snapshot_every is not None:
self.snapshot_every = int(snapshot_every)
else:
self.snapshot_every = None
self.snapshot_path = snapshot_path
self.ready()
def ready(self):
# input (where first dimension is time)
self.x = T.tensor3(name='x')
# target (where first dimension is time)
if self.output_type == 'real':
self.y = T.tensor3(name='y', dtype=theano.config.floatX)
elif self.output_type == 'binary':
self.y = T.tensor3(name='y', dtype='int32')
elif self.output_type == 'softmax': # now it is a matrix (T x n_seq)
self.y = T.matrix(name='y', dtype='int32')
else:
raise NotImplementedError
# learning rate
self.lr = T.scalar()
if self.activation == 'tanh':
activation = T.tanh
elif self.activation == 'sigmoid':
activation = T.nnet.sigmoid
elif self.activation == 'relu':
activation = lambda x: x * (x > 0)
elif self.activation == 'cappedrelu':
activation = lambda x: T.minimum(x * (x > 0), 6)
else:
raise NotImplementedError
self.rnn = RNN(input=self.x, n_in=self.n_in,
n_hidden=self.n_hidden, n_out=self.n_out,
activation=activation, output_type=self.output_type)
if self.output_type == 'real':
self.predict = theano.function(inputs=[self.x, ],
outputs=self.rnn.y_pred,
mode=mode)
elif self.output_type == 'binary':
self.predict_proba = theano.function(inputs=[self.x, ],
outputs=self.rnn.p_y_given_x, mode=mode)
self.predict = theano.function(inputs=[self.x, ],
outputs=T.round(self.rnn.p_y_given_x),
mode=mode)
elif self.output_type == 'softmax':
self.predict_proba = theano.function(inputs=[self.x, ],
outputs=self.rnn.p_y_given_x, mode=mode)
self.predict = theano.function(inputs=[self.x, ],
outputs=self.rnn.y_out, mode=mode)
else:
raise NotImplementedError
def shared_dataset(self, data_xy, borrow=True):
""" Load the dataset into shared variables """
data_x, data_y = data_xy
shared_x = theano.shared(np.asarray(data_x,
dtype=theano.config.floatX),
borrow=True)
shared_y = theano.shared(np.asarray(data_y,
dtype=theano.config.floatX),
borrow=True)
if self.output_type in ('binary', 'softmax'):
return shared_x, T.cast(shared_y, 'int32')
else:
return shared_x, shared_y
def __getstate__(self):
""" Return state sequence."""
params = self._get_params() # parameters set in constructor
theta = self.rnn.theta.get_value()
state = (params, theta)
return state
def _set_weights(self, theta):
""" Set fittable parameters from weights sequence.
"""
self.rnn.theta.set_value(theta)
def __setstate__(self, state):
""" Set parameters from state sequence.
"""
params, theta = state
self.set_params(**params)
self.ready()
self._set_weights(theta)
def save(self, fpath='.', fname=None):
""" Save a pickled representation of Model state. """
fpathstart, fpathext = os.path.splitext(fpath)
if fpathext == '.pkl':
# User supplied an absolute path to a pickle file
fpath, fname = os.path.split(fpath)
elif fname is None:
# Generate filename based on date
date_obj = datetime.datetime.now()
date_str = date_obj.strftime('%Y-%m-%d-%H:%M:%S')
class_name = self.__class__.__name__
fname = '%s.%s.pkl' % (class_name, date_str)
fabspath = os.path.join(fpath, fname)
logger.info("Saving to %s ..." % fabspath)
file = open(fabspath, 'wb')
state = self.__getstate__()
pickle.dump(state, file, protocol=pickle.HIGHEST_PROTOCOL)
file.close()
def load(self, path):
""" Load model parameters from path. """
logger.info("Loading from %s ..." % path)
file = open(path, 'rb')
state = pickle.load(file)
self.__setstate__(state)
file.close()
def optional_output(self, train_set_x, show_norms=True, show_output=True):
""" Produces some debugging output. """
if show_norms:
norm_output = []
for param in self.rnn.params:
norm_output.append('%s: %6.4f' % (param.name,
self.get_norms[param]()))
logger.info("norms: {" + ', '.join(norm_output) + "}")
if show_output:
# show output for a single case
if self.output_type == 'binary':
output_fn = self.predict_proba
else:
output_fn = self.predict
logger.info("sample output: " + \
str(output_fn(train_set_x.get_value(
borrow=True)[:, 0, :][:, np.newaxis, :]).flatten()))
def fit(self, X_train, Y_train, X_test=None, Y_test=None,
validate_every=100, optimizer='sgd', compute_zero_one=False,
show_norms=True, show_output=True):
""" Fit model
Pass in X_test, Y_test to compute test error and report during
training.
X_train : ndarray (T x n_in)
Y_train : ndarray (T x n_out)
validation_frequency : int
in terms of number of epochs
optimizer : string
Optimizer type.
Possible values:
'sgd' : batch stochastic gradient descent
'cg' : nonlinear conjugate gradient algorithm
(scipy.optimize.fmin_cg)
'bfgs' : quasi-Newton method of Broyden, Fletcher, Goldfarb,
and Shanno (scipy.optimize.fmin_bfgs)
'l_bfgs_b' : Limited-memory BFGS (scipy.optimize.fmin_l_bfgs_b)
compute_zero_one : bool
in the case of binary output, compute zero-one error in addition to
cross-entropy error
show_norms : bool
Show L2 norms of individual parameter groups while training.
show_output : bool
Show the model output on first training case while training.
"""
if X_test is not None:
assert(Y_test is not None)
self.interactive = True
test_set_x, test_set_y = self.shared_dataset((X_test, Y_test))
else:
self.interactive = False
train_set_x, train_set_y = self.shared_dataset((X_train, Y_train))
if compute_zero_one:
assert(self.output_type == 'binary' \
or self.output_type == 'softmax')
# compute number of minibatches for training
# note that cases are the second dimension, not the first
n_train = train_set_x.get_value(borrow=True).shape[1]
n_train_batches = int(np.ceil(1.0 * n_train / self.batch_size))
if self.interactive:
n_test = test_set_x.get_value(borrow=True).shape[1]
n_test_batches = int(np.ceil(1.0 * n_test / self.batch_size))
#validate_every is specified in terms of epochs
validation_frequency = validate_every * n_train_batches
######################
# BUILD ACTUAL MODEL #
######################
logger.info('... building the model')
index = T.lscalar('index') # index to a [mini]batch
n_ex = T.lscalar('n_ex') # total number of examples
# learning rate (may change)
l_r = T.scalar('l_r', dtype=theano.config.floatX)
mom = T.scalar('mom', dtype=theano.config.floatX) # momentum
cost = self.rnn.loss(self.y) \
+ self.L1_reg * self.rnn.L1 \
+ self.L2_reg * self.rnn.L2_sqr
# Proper implementation of variable-batch size evaluation
# Note that classifier.errors() returns the mean error
# But the last batch may be a smaller size
# So we keep around the effective_batch_size (whose last element may
# be smaller than the rest)
# And weight the reported error by the batch_size when we average
# Also, by keeping batch_start and batch_stop as symbolic variables,
# we make the theano function easier to read
batch_start = index * self.batch_size
batch_stop = T.minimum(n_ex, (index + 1) * self.batch_size)
effective_batch_size = batch_stop - batch_start
get_batch_size = theano.function(inputs=[index, n_ex],
outputs=effective_batch_size)
compute_train_error = theano.function(inputs=[index, n_ex],
outputs=self.rnn.loss(self.y),
givens={self.x: train_set_x[:, batch_start:batch_stop],
self.y: train_set_y[:, batch_start:batch_stop]},
mode=mode)
if compute_zero_one:
compute_train_zo = theano.function(inputs=[index, n_ex],
outputs=self.rnn.errors(self.y),
givens={self.x: train_set_x[:, batch_start:batch_stop],
self.y: train_set_y[:, batch_start:batch_stop]},
mode=mode)
if self.interactive:
compute_test_error = theano.function(inputs=[index, n_ex],
outputs=self.rnn.loss(self.y),
givens={self.x: test_set_x[:, batch_start:batch_stop],
self.y: test_set_y[:, batch_start:batch_stop]},
mode=mode)
if compute_zero_one:
compute_test_zo = theano.function(inputs=[index, n_ex],
outputs=self.rnn.errors(self.y),
givens={self.x: test_set_x[:, batch_start:batch_stop],
self.y: test_set_y[:, batch_start:batch_stop]},
mode=mode)
self.get_norms = {}
for param in self.rnn.params:
self.get_norms[param] = theano.function(inputs=[],
outputs=self.rnn.l2_norms[param], mode=mode)
# compute the gradient of cost with respect to theta using BPTT
gtheta = T.grad(cost, self.rnn.theta)
if optimizer == 'sgd':
updates = {}
theta = self.rnn.theta
theta_update = self.rnn.theta_update
# careful here, update to the shared variable
# cannot depend on an updated other shared variable
# since updates happen in parallel
# so we need to be explicit
upd = mom * theta_update - l_r * gtheta
updates[theta_update] = upd
updates[theta] = theta + upd
# compiling a Theano function `train_model` that returns the
# cost, but in the same time updates the parameter of the
# model based on the rules defined in `updates`
train_model = theano.function(inputs=[index, n_ex, l_r, mom],
outputs=cost,
updates=updates,
givens={self.x: train_set_x[:, batch_start:batch_stop],
self.y: train_set_y[:, batch_start:batch_stop]},
mode=mode)
###############
# TRAIN MODEL #
###############
logger.info('... training')
epoch = 0
while (epoch < self.n_epochs):
epoch = epoch + 1
effective_momentum = self.final_momentum \
if epoch > self.momentum_switchover \
else self.initial_momentum
for minibatch_idx in xrange(n_train_batches):
minibatch_avg_cost = train_model(minibatch_idx, n_train,
self.learning_rate,
effective_momentum)
# iteration number (how many weight updates have we made?)
# epoch is 1-based, index is 0 based
iter = (epoch - 1) * n_train_batches + minibatch_idx + 1
if iter % validation_frequency == 0:
# compute loss on training set
train_losses = [compute_train_error(i, n_train)
for i in xrange(n_train_batches)]
train_batch_sizes = [get_batch_size(i, n_train)
for i in xrange(n_train_batches)]
this_train_loss = np.average(train_losses,
weights=train_batch_sizes)
if compute_zero_one:
train_zero_one = [compute_train_zo(i, n_train)
for i in xrange(n_train_batches)]
this_train_zero_one = np.average(train_zero_one,
weights=train_batch_sizes)
if self.interactive:
test_losses = [compute_test_error(i, n_test)
for i in xrange(n_test_batches)]
test_batch_sizes = [get_batch_size(i, n_test)
for i in xrange(n_test_batches)]
this_test_loss = np.average(test_losses,
weights=test_batch_sizes)
if compute_zero_one:
test_zero_one = [compute_test_zo(i, n_test)
for i in xrange(n_test_batches)]
this_test_zero_one = np.average(test_zero_one,
weights=test_batch_sizes)
if compute_zero_one:
logger.info('epoch %i, mb %i/%i, tr loss %f, '
'tr zo %f, te loss %f '
'te zo %f lr: %f' % \
(epoch, minibatch_idx + 1,
n_train_batches,
this_train_loss, this_train_zero_one,
this_test_loss, this_test_zero_one,
self.learning_rate))
else:
logger.info('epoch %i, mb %i/%i, tr loss %f '
'te loss %f lr: %f' % \
(epoch, minibatch_idx + 1, n_train_batches,
this_train_loss, this_test_loss,
self.learning_rate))
else:
if compute_zero_one:
logger.info('epoch %i, mb %i/%i, train loss %f'
' train zo %f '
'lr: %f' % (epoch,
minibatch_idx + 1,
n_train_batches,
this_train_loss,
this_train_zero_one,
self.learning_rate))
else:
logger.info('epoch %i, mb %i/%i, train loss %f'
' lr: %f' % (epoch,
minibatch_idx + 1,
n_train_batches,
this_train_loss,
self.learning_rate))
self.optional_output(train_set_x, show_norms,
show_output)
self.learning_rate *= self.learning_rate_decay
if self.snapshot_every is not None:
if (epoch + 1) % self.snapshot_every == 0:
date_obj = datetime.datetime.now()
date_str = date_obj.strftime('%Y-%m-%d-%H:%M:%S')
class_name = self.__class__.__name__
fname = '%s.%s-snapshot-%d.pkl' % (class_name,
date_str, epoch + 1)
fabspath = os.path.join(self.snapshot_path, fname)
self.save(fpath=fabspath)
elif optimizer == 'cg' or optimizer == 'bfgs' \
or optimizer == 'l_bfgs_b':
# compile a theano function that returns the cost of a minibatch
batch_cost = theano.function(inputs=[index, n_ex],
outputs=cost,
givens={self.x: train_set_x[:, batch_start:batch_stop],
self.y: train_set_y[:, batch_start:batch_stop]},
mode=mode, name="batch_cost")
# compile a theano function that returns the gradient of the
# minibatch with respect to theta
batch_grad = theano.function(inputs=[index, n_ex],
outputs=T.grad(cost, self.rnn.theta),
givens={self.x: train_set_x[:, batch_start:batch_stop],
self.y: train_set_y[:, batch_start:batch_stop]},
mode=mode, name="batch_grad")
# creates a function that computes the average cost on the training
# set
def train_fn(theta_value):
self.rnn.theta.set_value(theta_value, borrow=True)
train_losses = [batch_cost(i, n_train)
for i in xrange(n_train_batches)]
train_batch_sizes = [get_batch_size(i, n_train)
for i in xrange(n_train_batches)]
return np.average(train_losses, weights=train_batch_sizes)
# creates a function that computes the average gradient of cost
# with respect to theta
def train_fn_grad(theta_value):
self.rnn.theta.set_value(theta_value, borrow=True)
train_grads = [batch_grad(i, n_train)
for i in xrange(n_train_batches)]
train_batch_sizes = [get_batch_size(i, n_train)
for i in xrange(n_train_batches)]
return np.average(train_grads, weights=train_batch_sizes,
axis=0)
# validation function, prints useful output after each iteration
def callback(theta_value):
self.epoch += 1
if (self.epoch) % validate_every == 0:
self.rnn.theta.set_value(theta_value, borrow=True)
# compute loss on training set
train_losses = [compute_train_error(i, n_train)
for i in xrange(n_train_batches)]
train_batch_sizes = [get_batch_size(i, n_train)
for i in xrange(n_train_batches)]
this_train_loss = np.average(train_losses,
weights=train_batch_sizes)
if compute_zero_one:
train_zero_one = [compute_train_zo(i, n_train)
for i in xrange(n_train_batches)]
this_train_zero_one = np.average(train_zero_one,
weights=train_batch_sizes)
if self.interactive:
test_losses = [compute_test_error(i, n_test)
for i in xrange(n_test_batches)]
test_batch_sizes = [get_batch_size(i, n_test)
for i in xrange(n_test_batches)]
this_test_loss = np.average(test_losses,
weights=test_batch_sizes)
if compute_zero_one:
test_zero_one = [compute_test_zo(i, n_test)
for i in xrange(n_test_batches)]
this_test_zero_one = np.average(test_zero_one,
weights=test_batch_sizes)
if compute_zero_one:
logger.info('epoch %i, tr loss %f, '
'tr zo %f, te loss %f '
'te zo %f' % \
(self.epoch, this_train_loss,
this_train_zero_one, this_test_loss,
this_test_zero_one))
else:
logger.info('epoch %i, tr loss %f, te loss %f' % \
(self.epoch, this_train_loss,
this_test_loss, self.learning_rate))
else:
if compute_zero_one:
logger.info('epoch %i, train loss %f'
', train zo %f ' % \
(self.epoch, this_train_loss,
this_train_zero_one))
else:
logger.info('epoch %i, train loss %f ' % \
(self.epoch, this_train_loss))
self.optional_output(train_set_x, show_norms, show_output)
###############
# TRAIN MODEL #
###############
logger.info('... training')
# using scipy conjugate gradient optimizer
import scipy.optimize
if optimizer == 'cg':
of = scipy.optimize.fmin_cg
elif optimizer == 'bfgs':
of = scipy.optimize.fmin_bfgs
elif optimizer == 'l_bfgs_b':
of = scipy.optimize.fmin_l_bfgs_b
logger.info("Optimizing using %s..." % of.__name__)
start_time = time.clock()
# keep track of epochs externally
# these get updated through callback
self.epoch = 0
# interface to l_bfgs_b is different than that of cg, bfgs
# however, this will be changed in scipy 0.11
# unified under scipy.optimize.minimize
if optimizer == 'cg' or optimizer == 'bfgs':
best_theta = of(
f=train_fn,
x0=self.rnn.theta.get_value(),
# x0=np.zeros(self.rnn.theta.get_value().shape,
# dtype=theano.config.floatX),
fprime=train_fn_grad,
callback=callback,
disp=1,
retall=1,
maxiter=self.n_epochs)
elif optimizer == 'l_bfgs_b':
best_theta, f_best_theta, info = of(
func=train_fn,
x0=self.rnn.theta.get_value(),
fprime=train_fn_grad,
iprint=validate_every,
maxfun=self.n_epochs) # max number of feval
end_time = time.clock()
print "Optimization time: %f" % (end_time - start_time)
else:
raise NotImplementedError
def test_real(n_epochs=1000):
""" Test RNN with real-valued outputs. """
n_hidden = 10
n_in = 5
n_out = 3
n_steps = 10
n_seq = 10 # per batch
n_batches = 10
np.random.seed(0)
# simple lag test
seq = np.random.randn(n_steps, n_seq * n_batches, n_in)
targets = np.zeros((n_steps, n_seq * n_batches, n_out))
targets[1:, :, 0] = seq[:-1, :, 3] # delayed 1
targets[1:, :, 1] = seq[:-1, :, 2] # delayed 1
targets[2:, :, 2] = seq[:-2, :, 0] # delayed 2
targets += 0.01 * np.random.standard_normal(targets.shape)
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
learning_rate=0.01, learning_rate_decay=0.999,
n_epochs=n_epochs, batch_size=n_seq, activation='tanh',
L2_reg=1e-3)
model.fit(seq, targets, validate_every=100, optimizer='bfgs')
plt.close('all')
fig = plt.figure()
ax1 = plt.subplot(211)
plt.plot(seq[:, 0, :])
ax1.set_title('input')
ax2 = plt.subplot(212)
true_targets = plt.plot(targets[:, 0, :])
guess = model.predict(seq[:, 0, :][:, np.newaxis, :])
guessed_targets = plt.plot(guess.squeeze(), linestyle='--')
for i, x in enumerate(guessed_targets):
x.set_color(true_targets[i].get_color())
ax2.set_title('solid: true output, dashed: model output')
def test_binary(multiple_out=False, n_epochs=1000, optimizer='cg'):
""" Test RNN with binary outputs. """
n_hidden = 10
n_in = 5
if multiple_out:
n_out = 2
else:
n_out = 1
n_steps = 10
n_seq = 10 # per batch
n_batches = 50
np.random.seed(0)
# simple lag test
seq = np.random.randn(n_steps, n_seq * n_batches, n_in)
targets = np.zeros((n_steps, n_seq * n_batches, n_out))
# whether lag 1 (dim 3) is greater than lag 2 (dim 0)
targets[2:, :, 0] = np.cast[np.int](seq[1:-1, :, 3] > seq[:-2, :, 0])
if multiple_out:
# whether product of lag 1 (dim 4) and lag 1 (dim 2)
# is less than lag 2 (dim 0)
targets[2:, :, 1] = np.cast[np.int](
(seq[1:-1, :, 4] * seq[1:-1, :, 2]) > seq[:-2, :, 0])
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
learning_rate=0.005, learning_rate_decay=0.999,
n_epochs=n_epochs, batch_size=n_seq, activation='tanh',
output_type='binary')
model.fit(seq, targets, validate_every=100, compute_zero_one=True,
optimizer=optimizer)
seqs = xrange(10)
plt.close('all')
for seq_num in seqs:
fig = plt.figure()
ax1 = plt.subplot(211)
plt.plot(seq[:, seq_num, :])
ax1.set_title('input')
ax2 = plt.subplot(212)
true_targets = plt.step(xrange(n_steps), targets[:, seq_num, :],
marker='o')
guess = model.predict_proba(seq[:, seq_num, :][:, np.newaxis, :])
guessed_targets = plt.step(xrange(n_steps), guess.squeeze())
plt.setp(guessed_targets, linestyle='--', marker='d')
for i, x in enumerate(guessed_targets):
x.set_color(true_targets[i].get_color())
ax2.set_ylim((-0.1, 1.1))
ax2.set_title('solid: true output, dashed: model output (prob)')
def test_softmax(n_epochs=250, optimizer='cg'):
""" Test RNN with softmax outputs. """
n_hidden = 10
n_in = 5
n_steps = 10
n_seq = 10 # per batch
n_batches = 50
n_classes = 3
n_out = n_classes # restricted to single softmax per time step
np.random.seed(0)
# simple lag test
seq = np.random.randn(n_steps, n_seq * n_batches, n_in)
targets = np.zeros((n_steps, n_seq * n_batches), dtype=np.int)
thresh = 0.5
# if lag 1 (dim 3) is greater than lag 2 (dim 0) + thresh
# class 1
# if lag 1 (dim 3) is less than lag 2 (dim 0) - thresh
# class 2
# if lag 2(dim0) - thresh <= lag 1 (dim 3) <= lag2(dim0) + thresh
# class 0
targets[2:, :][seq[1:-1, :, 3] > seq[:-2, :, 0] + thresh] = 1
targets[2:, :][seq[1:-1, :, 3] < seq[:-2, :, 0] - thresh] = 2
#targets[:, 2:, 0] = np.cast[np.int](seq[:, 1:-1, 3] > seq[:, :-2, 0])
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
learning_rate=0.005, learning_rate_decay=0.999,
n_epochs=n_epochs, batch_size=n_seq, activation='tanh',
output_type='softmax')
model.fit(seq, targets, validate_every=10, compute_zero_one=True,
optimizer=optimizer)
seqs = xrange(10)
plt.close('all')
for seq_num in seqs:
fig = plt.figure()
ax1 = plt.subplot(211)
plt.plot(seq[:, seq_num])
ax1.set_title('input')
ax2 = plt.subplot(212)
# blue line will represent true classes
true_targets = plt.step(xrange(n_steps), targets[:, seq_num],
marker='o')
# show probabilities (in b/w) output by model
guess = model.predict_proba(seq[:, seq_num][:, np.newaxis])
guessed_probs = plt.imshow(guess.squeeze().T, interpolation='nearest',
cmap='gray')
ax2.set_title('blue: true class, grayscale: probs assigned by model')
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
t0 = time.time()
test_real(n_epochs=1000)
#test_binary(optimizer='sgd', n_epochs=1000)
#test_softmax(n_epochs=250, optimizer='sgd')
print "Elapsed time: %f" % (time.time() - t0)
| bsd-3-clause |
UBOdin/jitd-synthesis | treetoaster_scripts/generate_supplements_3_4_5.py | 1 | 10049 |
import gzip
import json
import sys
import os
import io
import copy
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.patches as mpatches
import math
from matplotlib.patches import Patch
from matplotlib.lines import Line2D
runcount = 10
#xdim = 22
#ydim = 12
#xdim = 10
#ydim = 6
savepdf = True
setbox = True
n_naive = "Naive"
n_set = "Index"
n_classic = "Classic"
n_dbt = "DBT"
n_tt = "TT"
def process_loglines(input_file_name, results_list_list_list):
# input_file_name = ""
# results_list_list_list = []
# type_dict = {}
input_file_obj = "" # file obj
iteration = -1
logline = ""
logline_list = []
latency = 0
latency_total = 0
optype = 0
vmsize = 0
latency_list = results_list_list_list[0]
operation_list_list = results_list_list_list[1]
vmsize_list = results_list_list_list[2]
input_file_obj = open(input_file_name, "r")
# Skip first line (schema info):
logline = input_file_obj.readline() #.decode("ascii")
while (True):
iteration += 1
# Keep reading until finished:
try:
logline = input_file_obj.readline() #.decode("ascii")
except:
print("File read error")
sys.exit(1)
#end_try
if (logline == ""):
break
#end_if
logline_list = logline.split("\t")
if (len(logline_list) != 29):
print("Unexpected length")
sys.exit(1)
#end_if
latency = int(logline_list[1])
latency_total += latency
#id_list.append(iteration)
#latency_list.append(latency)
optype = int(logline_list[2])
vmsize = int(logline_list[6]) # repurposed jitd depth field (kludge)
latency_list.append(latency)
operation_list_list[optype].append(latency) # per-operation type
vmsize_list.append(vmsize)
#end_while
return
#end_def
def get_memory_lists(workload):
# workload = ""
input_file_prefix = ""
input_file_name = ""
naive_results_list_list_list = [[], [], []]
set_results_list_list_list = [[], [], []]
jitd_results_list_list_list = [[], [], []]
toaster_results_list_list_list = [[], [], []]
classic_results_list_list_list = [[], [], []]
for i in range(5): # 5 DB operations
naive_results_list_list_list[1].append([])
set_results_list_list_list[1].append([])
jitd_results_list_list_list[1].append([])
toaster_results_list_list_list[1].append([])
classic_results_list_list_list[1].append([])
#end_if
for i in range(runcount):
#index_list.append(i + 1)
input_file_name = "tracefiles/naive_data_performance_" + workload + "_" + str(i) + ".txt"
process_loglines(input_file_name, naive_results_list_list_list)
input_file_name = "tracefiles/set_data_performance_" + workload + "_" + str(i) + ".txt"
process_loglines(input_file_name, set_results_list_list_list)
input_file_name = "tracefiles/view_data_performance_" + workload + "_" + str(i) + ".txt"
process_loglines(input_file_name, jitd_results_list_list_list)
input_file_name = "tracefiles/toaster_data_performance_" + workload + "_" + str(i) + ".txt"
process_loglines(input_file_name, toaster_results_list_list_list)
input_file_name = "tracefiles/classic_data_performance_" + workload + "_" + str(i) + ".txt"
process_loglines(input_file_name, classic_results_list_list_list)
#end_for
return naive_results_list_list_list, set_results_list_list_list, classic_results_list_list_list, toaster_results_list_list_list, jitd_results_list_list_list
#end_def
#def graph_boxplot(usenaive):
def graph_boxplot(whichloads):
# usenaive = False # Whether to use naive search data in processing graphs
# whichloads = 0 # Which loads to include in graph: 2 = Classic and DBT only; 3 = add TT; 4 = add Naive (exclude Index in all)
workload_list = ["a", "b", "c", "d", "f"]
naive_uber_list = []
set_uber_list = []
classic_uber_list = []
toaster_uber_list = []
jitd_uber_list = []
summary_list = []
summary_list.append([])
for workload in workload_list:
print("Processing maintenance " + workload)
naive_uber_list, set_uber_list, classic_uber_list, toaster_uber_list, jitd_uber_list = get_memory_lists(workload)
'''
if (usenaive == True):
summary_list.append(naive_uber_list[2])
#end_if
'''
summary_list.append(naive_uber_list[2])
summary_list.append(set_uber_list[2])
summary_list.append(classic_uber_list[2])
summary_list.append(toaster_uber_list[2])
summary_list.append(jitd_uber_list[2])
summary_list.append([])
#end_for
fig2_list, ax2_list = plt.subplots()
bp_latency = ax2_list.boxplot(summary_list)
'''
#ax2_list.set_title("Average Process Memory Usage By Workload", fontsize = 14, fontweight = "bold")
ax2_list.set_xlabel("Maintenance type and workload", fontsize = 14, fontweight = "bold")
ax2_list.set_ylabel("Average memory\npages allocated", fontsize = 14, fontweight = "bold")
ax2_list.axis([1, len(workload_list) * 5 + 1, 0, 100000])
if (usenaive == True):
x_subname_list = [n_naive, n_set, n_classic, n_dbt, n_tt, ""]
else:
x_subname_list = [n_set, n_classic, n_dbt, n_tt, ""]
#end_if
x_labels = [""]
for i in range(5):
for e in x_subname_list:
x_labels.append(e)
#end_for
#end_for
x_labels[0] = "\n\n\n Workload A"
x_labels[5] = "\n\n\n Workload B"
x_labels[10] = "\n\n\n Workload C"
x_labels[15] = "\n\n\n Workload D"
x_labels[20] = "\n\n\n Workload F"
# N.b. No data/plots for insert_singleton or remove_singleton -- these are mutate only
ax2_list.set_xticklabels(x_labels)
tick_list = ax2_list.get_xticklabels()
for i in range(len(tick_list)):
if (i % 5 != 0):
tick_list[i].set_rotation(-45)
tick_list[i].set_ha("left")
#end_if
#end_for
# Do not include naive search in memory usage boxplot:
if (usenaive == False):
if (savepdf == True):
fig2_list.savefig("graphs/figure_13.pdf", bbox_inches = "tight");
else:
fig2_list.savefig("graphs/figure_13.png");
#endif
return # Done; do not process scatter crossplot with naive search data
#end_if
'''
line_list = []
median = 0
memory_list = []
view_cost_list = []
try:
boxplot_input_file_obj = open("graphdata_summary.txt")
except:
print("ERROR: Missing data file. Please run script to create graphs 9-10-12 first.")
sys.exit(1)
#end_try
input_line = ""
while (True):
input_line = boxplot_input_file_obj.readline()
if (input_line == ""):
break
#end_if
view_cost_list.append(float(input_line))
#end_while
line_list = bp_latency["medians"] # boxplot return value is a dictionary -- get medians
for line in line_list:
# line is a pyplot line2d object
# get_ydata() returns a 2-tuple of redundant values
# so, get_ydata()[0] returns a float (or a "nan")
median = line.get_ydata()[0]
if (math.isnan(median) == True):
memory_list.append(0.0)
else:
memory_list.append(float(median))
#end_if
#end_for
'''
print(view_cost_list)
print("")
print(memory_list)
'''
fig3, ax3 = plt.subplots()
hadj = .07
vadj = 0.1
fig3.subplots_adjust(left = 0.125 + hadj, right = 0.90 + hadj, top = 0.88 + vadj, bottom = 0.115 + vadj)
color_list = ["brown", "orange", "black", "blue", "red", "green"]
#color_list = ["brown", "black", "blue", "red", "green"]
color = ""
marker_list = ["o", "s", "<", ">", "P", "*"]
marker = ""
if (len(view_cost_list) != len(memory_list)):
print("Error: Mismatched lists")
exit(1)
#end_if
for e, f, g in zip(range(len(view_cost_list)), view_cost_list, memory_list):
color = color_list[e % 6]
marker = marker_list[int(e / 6)]
if (f == 0):
continue
#end_if
# Skip index:
if (e % 6 == 2):
continue
#end_if
# Skip naive for loads 2-3:
if ((e % 6 == 1) and (whichloads >= 3)):
continue
#end_if
# Skip TT for load 2:
if ((e % 6 == 5) and (whichloads == 4)):
continue
#end_if
ax3.scatter(f, g, s = 40, color = color, marker = marker)
#end_for
#ax3.set_title("Latency / Memory Crossplot", fontsize = 14, fontweight = "bold")
ax3.set_xlabel("Average total latency (search + maintenance)", fontsize = 14, fontweight = "bold")
ax3.set_ylabel("Average memory\npages allocated", fontsize = 14, fontweight = "bold")
ax3.axis([0, 15000, 0, 80000])
handle_list = []
if (whichloads == 2):
handle_list.append(Patch(color = "orange", label = "Naive"))
else:
handle_list.append(Patch(color = "white", label = ""))
#end_if
#handle_list.append(Patch(color = "black", label = "Index"))
handle_list.append(Patch(color = "white", label = ""))
handle_list.append(Patch(color = "blue", label = "Classic"))
handle_list.append(Patch(color = "red", label = "DBT"))
if (whichloads <= 3):
handle_list.append(Patch(color = "green", label = "TT"))
else:
handle_list.append(Patch(color = "white", label = ""))
#end_if
handle_list.append(Line2D([], [], marker = "o", color = "black", label = "Workload A", linewidth = 0))
handle_list.append(Line2D([], [], marker = "s", color = "black", label = "Workload B", linewidth = 0))
handle_list.append(Line2D([], [], marker = "<", color = "black", label = "Workload C", linewidth = 0))
handle_list.append(Line2D([], [], marker = ">", color = "black", label = "Workload D", linewidth = 0))
handle_list.append(Line2D([], [], marker = "P", color = "black", label = "Workload F", linewidth = 0))
ax3.legend(handles = handle_list, loc = "upper right", ncol = 2)
#fig3.savefig("graphs/figure_11.pdf")
if (whichloads == 2):
fig3.savefig("graphs/supplement_3_crossplot_naive_class_dbt_tt.pdf")
elif (whichloads == 3):
fig3.savefig("graphs/supplement_4_crossplot_class_dbt_tt.pdf")
elif (whichloads == 4):
fig3.savefig("graphs/supplement_5_crossplot_class_dbt.pdf")
#end_if
return
#end_def
def main():
print("Processing supplementary crossplots")
graph_boxplot(2)
graph_boxplot(3)
graph_boxplot(4)
print("Done")
sys.exit(0)
print("Processing scatter crossplot (Figure 12)")
graph_boxplot(True)
print("Processing memory boxplot graph (Figure 13)")
graph_boxplot(False)
print("Success. Graphs in ../graph directory")
#end_def
main()
| apache-2.0 |
pld/bamboo | bamboo/tests/models/test_dataset.py | 2 | 5129 | from datetime import datetime
from pandas import DataFrame
from bamboo.tests.test_base import TestBase
from bamboo.models.dataset import Dataset
from bamboo.models.observation import Observation
from bamboo.lib.datetools import recognize_dates
from bamboo.lib.mongo import MONGO_ID_ENCODED
from bamboo.lib.schema_builder import OLAP_TYPE, RE_ENCODED_COLUMN, SIMPLETYPE
class TestDataset(TestBase):
def test_save(self):
for dataset_name in self.TEST_DATASETS:
dataset = Dataset().save(self.test_dataset_ids[dataset_name])
record = dataset.record
self.assertTrue(isinstance(record, dict))
self.assertTrue('_id' in record.keys())
def test_find(self):
for dataset_name in self.TEST_DATASETS:
dataset = Dataset.create(self.test_dataset_ids[dataset_name])
record = dataset.record
rows = Dataset.find(self.test_dataset_ids[dataset_name])
self.assertEqual(record, rows[0].record)
def test_find_one(self):
for dataset_name in self.TEST_DATASETS:
dataset = Dataset.create(self.test_dataset_ids[dataset_name])
record = dataset.record
row = Dataset.find_one(self.test_dataset_ids[dataset_name])
self.assertEqual(record, row.record)
def test_create(self):
for dataset_name in self.TEST_DATASETS:
dataset = Dataset.create(self.test_dataset_ids[dataset_name])
self.assertTrue(isinstance(dataset, Dataset))
def test_delete(self):
for dataset_name in self.TEST_DATASETS:
dataset = Dataset.create(self.test_dataset_ids[dataset_name])
records = Dataset.find(self.test_dataset_ids[dataset_name])
self.assertNotEqual(records, [])
dataset.delete()
records = Dataset.find(self.test_dataset_ids[dataset_name])
self.assertEqual(records, [])
self.assertEqual(Observation.encoding(dataset), None)
def test_update(self):
for dataset_name in self.TEST_DATASETS:
dataset = Dataset.create(self.test_dataset_ids[dataset_name])
self.assertFalse('field' in dataset.record)
dataset.update({'field': {'key': 'value'}})
dataset = Dataset.find_one(self.test_dataset_ids[dataset_name])
self.assertTrue('field' in dataset.record)
self.assertEqual(dataset.record['field'], {'key': 'value'})
def test_build_schema(self):
for dataset_name in self.TEST_DATASETS:
dataset = Dataset.create(self.test_dataset_ids[dataset_name])
dataset.build_schema(self.get_data(dataset_name))
# get dataset with new schema
dataset = Dataset.find_one(self.test_dataset_ids[dataset_name])
for key in [
Dataset.CREATED_AT, Dataset.SCHEMA, Dataset.UPDATED_AT]:
self.assertTrue(key in dataset.record.keys())
df_columns = self.get_data(dataset_name).columns.tolist()
seen_columns = []
for column_name, column_attributes in dataset.schema.items():
# check column_name is unique
self.assertFalse(column_name in seen_columns)
seen_columns.append(column_name)
# check column name is only legal chars
self.assertFalse(RE_ENCODED_COLUMN.search(column_name))
# check has require attributes
self.assertTrue(SIMPLETYPE in column_attributes)
self.assertTrue(OLAP_TYPE in column_attributes)
self.assertTrue(Dataset.LABEL in column_attributes)
# check label is an original column
original_col = column_attributes[Dataset.LABEL]
error_msg = '%s not in %s' % (original_col, df_columns)
self.assertTrue(original_col in df_columns, error_msg)
df_columns.remove(column_attributes[Dataset.LABEL])
# check not reserved key
self.assertFalse(column_name == MONGO_ID_ENCODED)
# ensure all columns in df_columns have store columns
self.assertTrue(len(df_columns) == 0)
def test_dframe(self):
dataset = Dataset.create(self.test_dataset_ids['good_eats.csv'])
dataset.save_observations(
recognize_dates(self.get_data('good_eats.csv')))
dframe = dataset.dframe()
self.assertTrue(isinstance(dframe, DataFrame))
self.assertTrue(all(self.get_data('good_eats.csv').reindex(
columns=dframe.columns).eq(dframe)))
columns = dframe.columns
# ensure no reserved keys
self.assertFalse(MONGO_ID_ENCODED in columns)
# ensure date is converted
self.assertTrue(isinstance(dframe.submit_date[0], datetime))
def test_count(self):
dataset = Dataset.create(self.test_dataset_ids['good_eats.csv'])
dataset.save_observations(
recognize_dates(self.get_data('good_eats.csv')))
self.assertEqual(len(dataset.dframe()), dataset.count())
| bsd-3-clause |
CVML/pycortex | cortex/blender.py | 2 | 4007 | import os
import struct
import numpy as np
from matplotlib import cm, colors
import bpy.ops
from bpy import context as C
from bpy import data as D
def make_object(pts, polys, name="mesh"):
mesh = D.meshes.new(name)
mesh.from_pydata(pts.tolist(), [], polys.tolist())
obj = D.objects.new(name, mesh)
C.scene.objects.link(obj)
return obj, mesh
def add_vcolor(color, mesh=None, name='color', cmap=cm.RdBu, vmin=None, vmax=None):
if mesh is None:
mesh = C.scene.objects.active.data
elif isinstance(mesh, str):
mesh = D.meshes[mesh]
bpy.ops.object.mode_set(mode='OBJECT')
if color.ndim == 1:
if vmin is None:
vmin = color.min()
if vmax is None:
vmax = color.max()
color = cmap((color - vmin) / (vmax - vmin))[:,:3]
loopidx = np.zeros((len(mesh.loops),), dtype=np.uint32)
mesh.loops.foreach_get('vertex_index', loopidx)
vcolor = mesh.vertex_colors.new(name)
for i, j in enumerate(loopidx):
vcolor.data[i].color = list(color[j])
return vcolor
def add_shapekey(shape, name=None):
bpy.ops.object.shape_key_add()
key = D.shape_keys[-1].key_blocks[-1]
if name is not None:
key.name = name
for i in range(len(key.data)):
key.data[i].co = shape[i]
return key
def cut_data(volumedata, name="retinotopy", projection="nearest", cmap=cm.RdBu, vmin=None, vmax=None, mesh="hemi"):
if isinstance(mesh, str):
mesh = D.meshes[mesh]
mapped = volumedata.map(projection)
if mapped.llen == len(mesh.vertices):
print("left hemisphere")
vcolor = mapped.left
else:
print ("right hemisphere")
vcolor = mapped.right
return add_vcolor(vcolor, mesh=mesh, name=name, cmap=cmap, vmin=vmin, vmax=vmax)
def fs_cut(subject, hemi):
from .freesurfer import get_surf
wpts, polys, curv = get_surf(subject, hemi, 'smoothwm')
ipts, _, _ = get_surf(subject, hemi, 'inflated')
obj, mesh = make_object(wpts, polys, name='hemi')
obj.scale = .1, .1, .1
C.scene.objects.active = obj
bpy.ops.object.shape_key_add()
add_vcolor(curv, mesh, vmin=-.6, vmax=.6, name='curvature')
add_shapekey(ipts, name='inflated')
obj.use_shape_key_edit_mode = True
return mesh
def write_patch(subject, hemi, name, mesh='hemi'):
from .freesurfer import get_paths, write_patch
if isinstance(mesh, str):
mesh = D.meshes[mesh]
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='DESELECT')
C.tool_settings.mesh_select_mode = False, True, False
bpy.ops.mesh.select_non_manifold()
bpy.ops.object.mode_set(mode='OBJECT')
mwall_edge = set()
for edge in mesh.edges:
if edge.select:
mwall_edge.add(edge.vertices[0])
mwall_edge.add(edge.vertices[1])
bpy.ops.object.mode_set(mode='EDIT')
C.tool_settings.mesh_select_mode = True, False, False
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
seam = set()
for edge in mesh.edges:
if edge.use_seam:
seam.add(edge.vertices[0])
seam.add(edge.vertices[1])
edge.select = True
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_more()
bpy.ops.object.mode_set(mode='OBJECT')
smore = set()
for i, vert in enumerate(mesh.vertices):
if vert.select:
smore.add(i)
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
fverts = set()
for face in mesh.polygons:
fverts.add(face.vertices[0])
fverts.add(face.vertices[1])
fverts.add(face.vertices[2])
edges = mwall_edge | (smore - seam)
verts = fverts - seam
pts = [(v, D.shape_keys['Key'].key_blocks['inflated'].data[v].co) for v in verts]
write_patch(get_paths(subject, hemi).format(name=name), pts, edges) | bsd-2-clause |
chaluemwut/fbserver | venv/lib/python2.7/site-packages/scipy/signal/windows.py | 7 | 48626 | """The suite of window functions."""
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy import special, linalg
from scipy.fftpack import fft
from scipy.lib.six import string_types
__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall',
'blackmanharris', 'flattop', 'bartlett', 'hanning', 'barthann',
'hamming', 'kaiser', 'gaussian', 'general_gaussian', 'chebwin',
'slepian', 'cosine', 'hann', 'get_window']
def boxcar(M, sym=True):
"""Return a boxcar or rectangular window.
Included for completeness, this is equivalent to no window at all.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
Whether the window is symmetric. (Has no effect for boxcar.)
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.boxcar(51)
>>> plt.plot(window)
>>> plt.title("Boxcar window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the boxcar window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
return np.ones(M, float)
def triang(M, sym=True):
"""Return a triangular window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.triang(51)
>>> plt.plot(window)
>>> plt.title("Triangular window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the triangular window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(1, (M + 1) // 2 + 1)
if M % 2 == 0:
w = (2 * n - 1.0) / M
w = np.r_[w, w[::-1]]
else:
w = 2 * n / (M + 1.0)
w = np.r_[w, w[-2::-1]]
if not sym and not odd:
w = w[:-1]
return w
def parzen(M, sym=True):
"""Return a Parzen window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.parzen(51)
>>> plt.plot(window)
>>> plt.title("Parzen window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Parzen window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(-(M - 1) / 2.0, (M - 1) / 2.0 + 0.5, 1.0)
na = np.extract(n < -(M - 1) / 4.0, n)
nb = np.extract(abs(n) <= (M - 1) / 4.0, n)
wa = 2 * (1 - np.abs(na) / (M / 2.0)) ** 3.0
wb = (1 - 6 * (np.abs(nb) / (M / 2.0)) ** 2.0 +
6 * (np.abs(nb) / (M / 2.0)) ** 3.0)
w = np.r_[wa, wb, wa[::-1]]
if not sym and not odd:
w = w[:-1]
return w
def bohman(M, sym=True):
"""Return a Bohman window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bohman(51)
>>> plt.plot(window)
>>> plt.title("Bohman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bohman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
fac = np.abs(np.linspace(-1, 1, M)[1:-1])
w = (1 - fac) * np.cos(np.pi * fac) + 1.0 / np.pi * np.sin(np.pi * fac)
w = np.r_[0, w, 0]
if not sym and not odd:
w = w[:-1]
return w
def blackman(M, sym=True):
r"""
Return a Blackman window.
The Blackman window is a taper formed by using the first three terms of
a summation of cosines. It was designed to have close to the minimal
leakage possible. It is close to optimal, only slightly worse than a
Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/M) + 0.08 \cos(4\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the Kaiser window.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackman(51)
>>> plt.plot(window)
>>> plt.title("Blackman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's blackman function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = (0.42 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1)) +
0.08 * np.cos(4.0 * np.pi * n / (M - 1)))
if not sym and not odd:
w = w[:-1]
return w
def nuttall(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window according to Nuttall.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.nuttall(51)
>>> plt.plot(window)
>>> plt.title("Nuttall window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Nuttall window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.3635819, 0.4891775, 0.1365995, 0.0106411]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def blackmanharris(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackmanharris(51)
>>> plt.plot(window)
>>> plt.title("Blackman-Harris window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman-Harris window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.35875, 0.48829, 0.14128, 0.01168]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def flattop(M, sym=True):
"""Return a flat top window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.flattop(51)
>>> plt.plot(window)
>>> plt.title("Flat top window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the flat top window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.2156, 0.4160, 0.2781, 0.0836, 0.0069]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac) +
a[4] * np.cos(4 * fac))
if not sym and not odd:
w = w[:-1]
return w
def bartlett(M, sym=True):
r"""
Return a Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The triangular window, with the first and last samples equal to zero
and the maximum value normalized to 1 (though the value 1 does not
appear if `M` is even and `sym` is True).
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \frac{2}{M-1} \left(
\frac{M-1}{2} - \left|n - \frac{M-1}{2}\right|
\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The Fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bartlett(51)
>>> plt.plot(window)
>>> plt.title("Bartlett window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's bartlett function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = np.where(np.less_equal(n, (M - 1) / 2.0),
2.0 * n / (M - 1), 2.0 - 2.0 * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def hann(M, sym=True):
r"""
Return a Hann window.
The Hann window is a taper formed by using a raised cosine or sine-squared
with ends that touch zero.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hann window is defined as
.. math:: w(n) = 0.5 - 0.5 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The window was named for Julius van Hann, an Austrian meteorologist. It is
also known as the Cosine Bell. It is sometimes erroneously referred to as
the "Hanning" window, from the use of "hann" as a verb in the original
paper and confusion with the very similar Hamming window.
Most references to the Hann window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hann(51)
>>> plt.plot(window)
>>> plt.title("Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hanning function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.5 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
hanning = hann
def barthann(M, sym=True):
"""Return a modified Bartlett-Hann window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.barthann(51)
>>> plt.plot(window)
>>> plt.title("Bartlett-Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett-Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
fac = np.abs(n / (M - 1.0) - 0.5)
w = 0.62 - 0.48 * fac + 0.38 * np.cos(2 * np.pi * fac)
if not sym and not odd:
w = w[:-1]
return w
def hamming(M, sym=True):
r"""Return a Hamming window.
The Hamming window is a taper formed by using a raised cosine with
non-zero endpoints, optimized to minimize the nearest side lobe.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and
is described in Blackman and Tukey. It was recommended for smoothing the
truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hamming(51)
>>> plt.plot(window)
>>> plt.title("Hamming window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hamming window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hamming function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.54 - 0.46 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def kaiser(M, beta, sym=True):
r"""Return a Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
beta : float
Shape parameter, determines trade-off between main-lobe width and
side lobe level. As beta gets large, the window narrows.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\left( \beta \sqrt{1-\frac{4n^2}{(M-1)^2}}
\right)/I_0(\beta)
with
.. math:: \quad -\frac{M-1}{2} \leq n \leq \frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple approximation
to the DPSS window based on Bessel functions.
The Kaiser window is a very good approximation to the Digital Prolate
Spheroidal Sequence, or Slepian window, which is the transform which
maximizes the energy in the main lobe of the window relative to total
energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hann
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.kaiser(51, beta=14)
>>> plt.plot(window)
>>> plt.title(r"Kaiser window ($\beta$=14)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Kaiser window ($\beta$=14)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's kaiser function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
alpha = (M - 1) / 2.0
w = (special.i0(beta * np.sqrt(1 - ((n - alpha) / alpha) ** 2.0)) /
special.i0(beta))
if not sym and not odd:
w = w[:-1]
return w
def gaussian(M, std, sym=True):
r"""Return a Gaussian window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
std : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left(\frac{n}{\sigma}\right)^2 }
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.gaussian(51, std=7)
>>> plt.plot(window)
>>> plt.title(r"Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
sig2 = 2 * std * std
w = np.exp(-n ** 2 / sig2)
if not sym and not odd:
w = w[:-1]
return w
def general_gaussian(M, p, sig, sym=True):
r"""Return a window with a generalized Gaussian shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
p : float
Shape parameter. p = 1 is identical to `gaussian`, p = 0.5 is
the same shape as the Laplace distribution.
sig : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The generalized Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left|\frac{n}{\sigma}\right|^{2p} }
the half-power point is at
.. math:: (2 \log(2))^{1/(2 p)} \sigma
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.general_gaussian(51, p=1.5, sig=7)
>>> plt.plot(window)
>>> plt.title(r"Generalized Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Freq. resp. of the gen. Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
w = np.exp(-0.5 * np.abs(n / sig) ** (2 * p))
if not sym and not odd:
w = w[:-1]
return w
# `chebwin` contributed by Kumar Appaiah.
def chebwin(M, at, sym=True):
r"""Return a Dolph-Chebyshev window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
at : float
Attenuation (in dB).
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Notes
-----
This window optimizes for the narrowest main lobe width for a given order
`M` and sidelobe equiripple attenuation `at`, using Chebyshev
polynomials. It was originally developed by Dolph to optimize the
directionality of radio antenna arrays.
Unlike most windows, the Dolph-Chebyshev is defined in terms of its
frequency response:
.. math:: W(k) = \frac
{\cos\{M \cos^{-1}[\beta \cos(\frac{\pi k}{M})]\}}
{\cosh[M \cosh^{-1}(\beta)]}
where
.. math:: \beta = \cosh \left [\frac{1}{M}
\cosh^{-1}(10^\frac{A}{20}) \right ]
and 0 <= abs(k) <= M-1. A is the attenuation in decibels (`at`).
The time domain window is then generated using the IFFT, so
power-of-two `M` are the fastest to generate, and prime number `M` are
the slowest.
The equiripple condition in the frequency domain creates impulses in the
time domain, which appear at the ends of the window.
References
----------
.. [1] C. Dolph, "A current distribution for broadside arrays which
optimizes the relationship between beam width and side-lobe level",
Proceedings of the IEEE, Vol. 34, Issue 6
.. [2] Peter Lynch, "The Dolph-Chebyshev Window: A Simple Optimal Filter",
American Meteorological Society (April 1997)
http://mathsci.ucd.ie/~plynch/Publications/Dolph.pdf
.. [3] F. J. Harris, "On the use of windows for harmonic analysis with the
discrete Fourier transforms", Proceedings of the IEEE, Vol. 66,
No. 1, January 1978
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.chebwin(51, at=100)
>>> plt.plot(window)
>>> plt.title("Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if np.abs(at) < 45:
warnings.warn("This window is not suitable for spectral analysis "
"for attenuation values lower than about 45dB because "
"the equivalent noise bandwidth of a Chebyshev window "
"does not grow monotonically with increasing sidelobe "
"attenuation when the attenuation is smaller than "
"about 45 dB.")
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
# compute the parameter beta
order = M - 1.0
beta = np.cosh(1.0 / order * np.arccosh(10 ** (np.abs(at) / 20.)))
k = np.r_[0:M] * 1.0
x = beta * np.cos(np.pi * k / M)
# Find the window's DFT coefficients
# Use analytic definition of Chebyshev polynomial instead of expansion
# from scipy.special. Using the expansion in scipy.special leads to errors.
p = np.zeros(x.shape)
p[x > 1] = np.cosh(order * np.arccosh(x[x > 1]))
p[x < -1] = (1 - 2 * (order % 2)) * np.cosh(order * np.arccosh(-x[x < -1]))
p[np.abs(x) <= 1] = np.cos(order * np.arccos(x[np.abs(x) <= 1]))
# Appropriate IDFT and filling up
# depending on even/odd M
if M % 2:
w = np.real(fft(p))
n = (M + 1) // 2
w = w[:n]
w = np.concatenate((w[n - 1:0:-1], w))
else:
p = p * np.exp(1.j * np.pi / M * np.r_[0:M])
w = np.real(fft(p))
n = M // 2 + 1
w = np.concatenate((w[n - 1:0:-1], w[1:n]))
w = w / max(w)
if not sym and not odd:
w = w[:-1]
return w
def slepian(M, width, sym=True):
"""Return a digital Slepian (DPSS) window.
Used to maximize the energy concentration in the main lobe. Also called
the digital prolate spheroidal sequence (DPSS).
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
width : float
Bandwidth
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.slepian(51, width=0.3)
>>> plt.plot(window)
>>> plt.title("Slepian (DPSS) window (BW=0.3)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Slepian window (BW=0.3)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if (M * width > 27.38):
raise ValueError("Cannot reliably obtain Slepian sequences for"
" M*width > 27.38.")
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
twoF = width / 2.0
alpha = (M - 1) / 2.0
m = np.arange(0, M) - alpha
n = m[:, np.newaxis]
k = m[np.newaxis, :]
AF = twoF * special.sinc(twoF * (n - k))
[lam, vec] = linalg.eig(AF)
ind = np.argmax(abs(lam), axis=-1)
w = np.abs(vec[:, ind])
w = w / max(w)
if not sym and not odd:
w = w[:-1]
return w
def cosine(M, sym=True):
"""Return a window with a simple cosine shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
.. versionadded:: 0.13.0
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.cosine(51)
>>> plt.plot(window)
>>> plt.title("Cosine window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the cosine window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
>>> plt.show()
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
w = np.sin(np.pi / M * (np.arange(0, M) + .5))
if not sym and not odd:
w = w[:-1]
return w
def get_window(window, Nx, fftbins=True):
"""
Return a window.
Parameters
----------
window : string, float, or tuple
The type of window to create. See below for more details.
Nx : int
The number of samples in the window.
fftbins : bool, optional
If True, create a "periodic" window ready to use with ifftshift
and be multiplied by the result of an fft (SEE ALSO fftfreq).
Returns
-------
get_window : ndarray
Returns a window of length `Nx` and type `window`
Notes
-----
Window types:
boxcar, triang, blackman, hamming, hann, bartlett, flattop,
parzen, bohman, blackmanharris, nuttall, barthann,
kaiser (needs beta), gaussian (needs std),
general_gaussian (needs power, width),
slepian (needs width), chebwin (needs attenuation)
If the window requires no parameters, then `window` can be a string.
If the window requires parameters, then `window` must be a tuple
with the first argument the string name of the window, and the next
arguments the needed parameters.
If `window` is a floating point number, it is interpreted as the beta
parameter of the kaiser window.
Each of the window types listed above is also the name of
a function that can be called directly to create a window of
that type.
Examples
--------
>>> from scipy import signal
>>> signal.get_window('triang', 7)
array([ 0.25, 0.5 , 0.75, 1. , 0.75, 0.5 , 0.25])
>>> signal.get_window(('kaiser', 4.0), 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
>>> signal.get_window(4.0, 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
"""
sym = not fftbins
try:
beta = float(window)
except (TypeError, ValueError):
args = ()
if isinstance(window, tuple):
winstr = window[0]
if len(window) > 1:
args = window[1:]
elif isinstance(window, string_types):
if window in ['kaiser', 'ksr', 'gaussian', 'gauss', 'gss',
'general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs',
'slepian', 'optimal', 'slep', 'dss',
'chebwin', 'cheb']:
raise ValueError("The '" + window + "' window needs one or "
"more parameters -- pass a tuple.")
else:
winstr = window
else:
raise ValueError("%s as window type is not supported." % str(type(window)))
if winstr in ['blackman', 'black', 'blk']:
winfunc = blackman
elif winstr in ['triangle', 'triang', 'tri']:
winfunc = triang
elif winstr in ['hamming', 'hamm', 'ham']:
winfunc = hamming
elif winstr in ['bartlett', 'bart', 'brt']:
winfunc = bartlett
elif winstr in ['hanning', 'hann', 'han']:
winfunc = hann
elif winstr in ['blackmanharris', 'blackharr', 'bkh']:
winfunc = blackmanharris
elif winstr in ['parzen', 'parz', 'par']:
winfunc = parzen
elif winstr in ['bohman', 'bman', 'bmn']:
winfunc = bohman
elif winstr in ['nuttall', 'nutl', 'nut']:
winfunc = nuttall
elif winstr in ['barthann', 'brthan', 'bth']:
winfunc = barthann
elif winstr in ['flattop', 'flat', 'flt']:
winfunc = flattop
elif winstr in ['kaiser', 'ksr']:
winfunc = kaiser
elif winstr in ['gaussian', 'gauss', 'gss']:
winfunc = gaussian
elif winstr in ['general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs']:
winfunc = general_gaussian
elif winstr in ['boxcar', 'box', 'ones', 'rect', 'rectangular']:
winfunc = boxcar
elif winstr in ['slepian', 'slep', 'optimal', 'dpss', 'dss']:
winfunc = slepian
elif winstr in ['cosine', 'halfcosine']:
winfunc = cosine
elif winstr in ['chebwin', 'cheb']:
winfunc = chebwin
else:
raise ValueError("Unknown window type.")
params = (Nx,) + args + (sym,)
else:
winfunc = kaiser
params = (Nx, beta, sym)
return winfunc(*params)
| apache-2.0 |
matthiasdiener/spack | var/spack/repos/builtin/packages/geopm/package.py | 1 | 4766 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Geopm(AutotoolsPackage):
"""GEOPM is an extensible power management framework targeting HPC.
The GEOPM package provides libgeopm, libgeopmpolicy and applications
geopmctl and geopmpolicy, as well as tools for postprocessing.
GEOPM is designed to be extended for new control algorithms and new
hardware power management features via its plugin infrastructure.
Note: GEOPM interfaces with hardware using Model Specific Registers (MSRs).
For propper usage make sure MSRs are made available directly or via the
msr-safe kernel module by your administrator."""
homepage = "https://geopm.github.io"
url = "https://github.com/geopm/geopm/releases/download/v0.4.0/geopm-0.4.0.tar.gz"
# Add additional proper versions and checksums here. "spack checksum geopm"
version('0.5.0', '61b454bc74d4606fe84818aef16c1be4')
version('0.4.0', 'd4cc8fffe521296dab379857d7e2064d')
version('0.3.0', '568fd37234396fff134f8d57b60f2b83')
version('master', git='https://github.com/geopm/geopm.git', branch='master')
version('develop', git='https://github.com/geopm/geopm.git', branch='dev')
# Variants reflecting most ./configure --help options
variant('debug', default=False, description='Enable debug.')
variant('coverage', default=False, description='Enable test coverage support, enables debug by default.')
variant('overhead', default=False, description='Enable GEOPM to calculate and display time spent in GEOPM API calls.')
variant('procfs', default=True, description='Enable procfs (disable for OSes not using procfs).')
variant('mpi', default=True, description='Enable MPI dependent components.')
variant('fortran', default=True, description='Build fortran interface.')
variant('doc', default=True, description='Create man pages with ruby-ronn.')
variant('openmp', default=True, description='Build with OpenMP.')
variant('ompt', default=False, description='Use OpenMP Tools Interface.')
variant('hwloc', default=True, description='Build with hwloc.')
variant('gnu-ld', default=False, description='Assume C compiler uses gnu-ld.')
# Added dependencies.
depends_on('m4', type='build')
depends_on('automake', type='build')
depends_on('autoconf', type='build')
depends_on('libtool', type='build')
depends_on('ruby-ronn', type='build', when='+doc')
depends_on('doxygen', type='build', when='+doc')
depends_on('numactl')
depends_on('mpi', when='+mpi')
# TODO: check if hwloc@specific-version still required with future openmpi
depends_on('[email protected]', when='+hwloc')
depends_on('json-c')
depends_on('py-pandas', type='run')
depends_on('py-numpy', type='run')
depends_on('py-natsort', type='run')
depends_on('py-matplotlib', type='run')
parallel = False
def configure_args(self):
args = []
args.extend(self.enable_or_disable('debug'))
args.extend(self.enable_or_disable('coverage'))
args.extend(self.enable_or_disable('overhead'))
args.extend(self.enable_or_disable('procfs'))
args.extend(self.enable_or_disable('mpi'))
args.extend(self.enable_or_disable('fortran'))
args.extend(self.enable_or_disable('doc'))
args.extend(self.enable_or_disable('openmp'))
args.extend(self.enable_or_disable('ompt'))
args.extend(self.with_or_without('hwloc', activation_value='prefix'))
args.extend(self.with_or_without('gnu-ld'))
return args
| lgpl-2.1 |
phoebe-project/phoebe2-docs | development/examples/single_spots.py | 2 | 2832 | #!/usr/bin/env python
# coding: utf-8
# Single Star with Spots
# ============================
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.3 installed (uncomment this line if running in an online notebook session such as colab).
# In[1]:
#!pip install -I "phoebe>=2.3,<2.4"
# As always, let's do imports and initialize a logger and a new bundle.
# In[2]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_star()
# Adding Spots
# ---------------------
# Let's add one spot to our star. Since there is only one star, the spot will automatically attach without needing to provide component (as is needed in the [binary with spots example](./binary_spots.ipynb)
# In[3]:
b.add_spot(radius=30, colat=80, long=0, relteff=0.9)
# Spot Parameters
# -----------------
# A spot is defined by the colatitude and longitude of its center, its angular radius, and the ratio of temperature of the spot to the local intrinsic value.
# In[4]:
print(b['spot'])
# The 'colat' parameter defines the colatitude on the star measured from its North (spin) Pole. The 'long' parameter measures the longitude of the spot - with longitude = 0 being defined as pointing towards the observer at t0 for a single star. See the [spots tutorial](../tutorials/spots.ipynb) for more details.
# In[5]:
times = np.linspace(0, 10, 11)
b.set_value('period', 10)
b.add_dataset('mesh', times=times, columns=['teffs'])
# In[6]:
b.run_compute(distortion_method='rotstar', irrad_method='none')
# In[7]:
afig, mplfig = b.plot(x='us', y='vs', fc='teffs',
animate=True, save='single_spots_1.gif', save_kwargs={'writer': 'imagemagick'})
# 
# If we set t0 to 5 instead of zero, then the spot will cross the line-of-sight at t=5 (since the spot's longitude is 0).
# In[8]:
b.set_value('t0', 5)
# In[9]:
b.run_compute(distortion_method='rotstar', irrad_method='none')
# In[10]:
afig, mplfig = b.plot(x='us', y='vs', fc='teffs',
animate=True, save='single_spots_2.gif', save_kwargs={'writer': 'imagemagick'})
# 
# And if we change the inclination to 0, we'll be looking at the north pole of the star. This clearly illustrates the right-handed rotation of the star. At time=t0=5 the spot will now be pointing in the negative y-direction.
# In[11]:
b.set_value('incl', 0)
# In[12]:
b.run_compute(distortion_method='rotstar', irrad_method='none')
# In[13]:
afig, mplfig = b.plot(x='us', y='vs', fc='teffs',
animate=True, save='single_spots_3.gif', save_kwargs={'writer': 'imagemagick'})
# 
# In[ ]:
| gpl-3.0 |
phoebe-project/phoebe2-docs | 2.0/tutorials/t0s.py | 1 | 5043 | #!/usr/bin/env python
# coding: utf-8
# Various t0s
# ============================
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.0 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
# In[ ]:
get_ipython().system('pip install -I "phoebe>=2.0,<2.1"')
# As always, let's do imports and initialize a logger and a new Bundle. See [Building a System](building_a_system.html) for more details.
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
# And let's make our system a little more interesting so that we can discriminate between the various t0s
# In[3]:
b.set_value('sma@binary', 20)
b.set_value('q', 0.8)
b.set_value('ecc', 0.8)
b.set_value('per0', 45)
# t0 Parameters
# ---------------
# **IMPORTANT NOTE**: the definition of the constraint between 't0_supconj' and 't0_perpass' was fixed in the 2.0.3 hotfix release. Earlier versions may give different results in some cases. Please make sure to update to at least 2.0.3. To check your installed version of phoebe, you can do the following:
# In[4]:
print phoebe.__version__
# There are three t0 parameters that are available to define an orbit (but only one of which is editable at any given time), as well as a t0 parameter for the entire system. Let's first access the three t0 parameters for our binary orbit.
#
# 't0_supconj' defines the time at which the primary component in our orbit is at superior conjunction. For a binary system in which there are eclipses, this is defined as the primary eclipse. By default this parameter is editable.
# In[5]:
b.get_parameter('t0_supconj', context='component')
# 't0_perpass' defines the time at which both components in our orbit is at periastron passage. By default this parameter is *constrained* by 't0_supconj'. For more details or information on how to change which parameter is editable, see the [Constraints Tutorial](constraints.html).
# In[6]:
b.get_parameter('t0_perpass', context='component')
# In[7]:
b.get_parameter('t0_perpass', context='constraint')
# The 't0_ref' defines the time at which the primary component in our orbit passes an arbitrary reference point. This 't0_ref' is defined in the same way as PHOEBE legacy's 'HJD0' parameter, so is included for convenience translating between the two.
# In[8]:
b.get_parameter('t0_ref', context='component')
# In[9]:
b.get_parameter('t0_ref', context='constraint')
# In addition, there is a single 't0' parameter that is system-wide. This parameter simply defines the time at which **all** parameters are defined and therefore at which all computations start. The value of this parameter begins to play an important role if any parameter is given a time-derivative (see [apsidal motion](apsidal_motion.html) for an example) or when using N-body instead of Keplerian dynamics (coming in a future release).
# In[10]:
b.get_parameter('t0', context='system')
# Influence on Oribits (positions)
# -----------------
# In[11]:
b.add_dataset('orb', times=np.linspace(-1,1,1001))
# In[12]:
b.run_compute(ltte=False)
# To visualize where these times are with respect to the orbits, we can plot the model orbit and highlight the positions of each star at the times defined by these parameters. Note here that the observer is in the **positive** z-direction.
# In[13]:
axs, artists = b.plot(x='xs', y='zs', time='t0_supconj')
# In[14]:
axs, artists = b.plot(x='xs', y='zs', time='t0_perpass')
# In[15]:
axs, artists = b.plot(x='xs', y='zs', time='t0_ref')
# Influence on Phasing
# -----------
# All computations in PHOEBE 2 are done in the time-domain. Times can be translated to phases using any ephemeris available, as well as any of the t0s.
#
# By default (if not passing any options), times will be phased using the outer-most orbit in the system and using 't0_supconj'.
# In[16]:
b.to_phase(0.0)
# In[17]:
b.to_phase(0.0, component='binary', t0='t0_supconj')
# In[18]:
b.to_phase(0.0, component='binary', t0='t0_perpass')
# In[19]:
b.to_phase(0.0, component='binary', t0='t0_ref')
# Similarly, if plotting phases on any axis, passing the 't0' keyword will set the zero-phase accordingly. To see this, let's compute a light curve and phase it with the various t0s shown in the orbits above.
# In[20]:
b.add_dataset('lc', times=np.linspace(0,1,51), ld_func='linear', ld_coeffs=[0.0])
# In[21]:
b.run_compute(ltte=False, irrad_method='none', atm='blackbody')
# In[22]:
axs, artists = b['lc01@model'].plot(x='phases', t0='t0_supconj', xlim=(-0.3,0.3))
# In[23]:
axs, artists = b['lc01@model'].plot(x='phases', t0='t0_perpass', xlim=(-0.3,0.3))
# In[24]:
axs, artists = b['lc01@model'].plot(x='phases', t0='t0_ref', xlim=(-0.3,0.3))
# In[ ]:
| gpl-3.0 |
jaantollander/Pointwise-Convergence | src/plotting/convergence_plotting.py | 8 | 3846 | # coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.widgets import Button
from src_legacy.analysis.convergence import max_slope
from src_legacy.io.load.load import LoadCsv
from src_legacy.other.settings import timeit
class ConvergencePlot:
"""
Interactive plot for results.
http://bastibe.de/2013-05-30-speeding-up-matplotlib.html
http://stackoverflow.com/questions/29277080/efficient-matplotlib-redrawing
"""
def __init__(self, filename, function):
self.initial = True # Flag
loads = LoadCsv(filename, function)
self.function = function
self.index = loads.errors.index.values
self.errors = loads.errors
self.a = loads.inputs[0]
self.x = loads.inputs[1]
self.fig, self.ax = plt.subplots(figsize=(10, 8))
self.fig.subplots_adjust(bottom=0.2)
self.ax.set(ylim=(10 ** -6, np.max(self.errors.values) + 0.1),
xlim=(self.index.min(), self.index.max()),
xlabel=r'$ p $',
ylabel=r'$ \varepsilon $')
self.line, = self.ax.loglog([], [], lw=1, marker='*')
self.cline, = self.ax.loglog([], [], lw=2, marker='*')
self.background = self.fig.canvas.copy_from_bbox(self.ax.bbox)
# rect = [left, bottom, width, height] in normalized (0, 1) units
xprev = plt.axes([0.44, 0.05, 0.1, 0.075])
xnext = plt.axes([0.56, 0.05, 0.1, 0.075])
bnext = Button(xnext, r'$ x \Rightarrow $')
bprev = Button(xprev, r'$ \Leftarrow x $')
bnext.on_clicked(self.xnext)
bprev.on_clicked(self.xprev)
aprev = plt.axes([0.91, 0.45, 0.08, 0.075])
anext = plt.axes([0.91, 0.55, 0.08, 0.075])
cnext = Button(anext, r'$ a \Rightarrow $')
cprev = Button(aprev, r'$ \Leftarrow a $')
cnext.on_clicked(self.anext)
cprev.on_clicked(self.aprev)
self.index_x = 0
self.index_a = 0
self.line.set_xdata(self.index)
self.draw()
@timeit
def draw(self):
"""
Redraw the axis
"""
a_ = self.a[self.index_a]
x_ = self.x[self.index_x]
data = self.errors[str(a_)]
data = data[str(x_)]
mask = max_slope(data)
convergence = data.iloc[mask]
self.line.set_ydata(data.values)
self.cline.set_data(convergence.index.values, convergence.values)
self.ax.set_title(r'{function}: '.format(function=self.function) +
r'$ a: {}\approx {:.4f} $, '.format(a_, float(a_)) +
r'$ x: {}\approx {:.4f} $ '.format(x_, float(x_)))
if self.initial:
self.fig.canvas.draw()
self.initial = False
plt.show()
else:
self.fig.canvas.restore_region(self.background)
self.ax.draw_artist(self.ax.patch)
self.ax.draw_artist(self.line)
self.ax.draw_artist(self.cline)
# self.fig.canvas.update()
# self.fig.canvas.flush_events()
self.fig.canvas.blit(self.ax.bbox)
def xnext(self, event):
if self.index_x < len(self.x) - 1:
self.index_x += 1
self.draw()
def xprev(self, event):
if self.index_x > -len(self.x):
self.index_x -= 1
self.draw()
def anext(self, event):
if self.index_a < len(self.a) - 1:
self.index_a += 1
self.draw()
def aprev(self, event):
if self.index_a > 0:
self.index_a -= 1
self.draw()
sns.set()
ConvergencePlot('100000_391_1', 'step_function')
| mit |
Shaky156/Shield-Tablet-CPU2.5Ghz-GPU060Mhz-Overclock | scripts/tracing/dma-api/trace.py | 96 | 12420 | """Main program and stuff"""
#from pprint import pprint
from sys import stdin
import os.path
import re
from argparse import ArgumentParser
import cPickle as pickle
from collections import namedtuple
from plotting import plotseries, disp_pic
import smmu
class TracelineParser(object):
"""Parse the needed information out of an ftrace line"""
# <...>-6 [000] d..2 5.287079: dmadebug_iommu_map_page: device=sdhci-tegra.3, addr=0x01048000, size=4096 page=c13e7214 archdata=ed504640
def __init__(self):
self.pattern = re.compile("device=(?P<dev>.*), addr=(?P<addr>.*), size=(?P<size>.*) page=(?P<page>.*) archdata=(?P<archdata>.*)")
def parse(self, args):
args = self.pattern.match(args)
return (args.group("dev"), int(args.group("addr"), 16),
int(args.group("size")), int(args.group("page"), 16),
int(args.group("archdata"), 16))
def biggest_indices(items, n):
"""Return list of indices of n biggest elements in items"""
with_indices = [(x, i) for i, x in enumerate(items)]
ordered = sorted(with_indices)
return [i for x, i in ordered[-n:]]
def by_indices(xs, ids):
"""Get elements from the list xs by their indices"""
return [xs[i] for i in ids]
"""Event represents one input line"""
Event = namedtuple("Event", ["time", "dev", "data", "delta"])
class Trace(object):
def __init__(self, args):
smmu.VERBOSITY = args.verbosity
self._args = args
self.devlist = []
self.events = []
self.metrics = {
"max_peak": self._usage_peak,
"activity_rate": self._usage_activity,
"average_mem": self._usage_avg
}
self.traceliner = TracelineParser()
@staticmethod
def get_metrics():
"""What filter metrics to get max users"""
return ["max_peak", "activity_rate", "average_mem"]
def show(self):
"""Shuffle events around, build plots, and show them"""
if self._args.max_plots:
evs = self.merge_events()
else:
evs = self.events
series, devlist = self.unload(evs)
if not self._args.no_plots:
self.plot(series, devlist)
def _get_usage(self, evs):
"""Return a metric of how active the events in evs are"""
return self.metrics[self._args.max_metric](evs)
def _usage_peak(self, evs):
"""Return the biggest peak"""
return max(e.data for e in evs)
def _usage_activity(self, evs):
"""Return the activity count: simply the length of the event list"""
return len(evs)
def _usage_avg(self, evs):
"""Return the average over all points"""
# FIXME: the data points are not uniform in time, so this might be
# somewhat off.
return float(sum(e.data for e in evs)) / len(e)
def merge_events(self):
"""Find out biggest users, keep them and flatten others to a single user"""
sizes = []
dev_evs = []
for i, dev in enumerate(self.devlist):
dev_evs.append([e for e in self.events if e.dev == dev])
sizes.append(self._get_usage(dev_evs[i]))
# indices of the devices
biggestix = biggest_indices(sizes, self._args.max_plots)
print biggestix
is_big = {}
for i, dev in enumerate(self.devlist):
is_big[dev] = i in biggestix
evs = []
for e in self.events:
if not is_big[e.dev]:
e = Event(e.time, "others", e.data, e.delta)
evs.append(e)
self.devlist.append("others")
return evs
def unload(self, events):
"""Prepare the event list for plotting
series ends up as [([time0], [data0]), ([time1], [data1]), ...]
"""
# ([x], [y]) for matplotlib
series = [([], []) for x in self.devlist]
devidx = dict([(d, i) for i, d in enumerate(self.devlist)])
for event in events:
devid = devidx[event.dev]
series[devid][0].append(event.time)
series[devid][1].append(event.data) # self.dev_data(event.dev))
series_out = []
devlist_out = []
for ser, dev in zip(series, self.devlist):
if len(ser[0]) > 0:
series_out.append(ser)
devlist_out.append(dev)
return series_out, devlist_out
def plot(self, series, devlist):
"""Display the plots"""
#series, devlist = flatten_axes(self.series, self.devlist,
# self._args.max_plots)
devinfo = (series, map(str, devlist))
allocfreeinfo = (self.allocsfrees, ["allocd", "freed", "current"])
plotseries(devinfo, allocfreeinfo)
#plotseries(devinfo)
def dev_data(self, dev):
"""what data to plot against time"""
return dev._cur_alloc
def _cache_hash(self, filename):
"""The trace files are probably not of the same size"""
return str(os.path.getsize(filename))
def load_cache(self):
"""Get the trace data from a database file, if one exists"""
has = self._cache_hash(self._args.filename)
try:
cache = open("trace." + has)
except IOError:
pass
else:
self._load_cache(pickle.load(cache))
return True
return False
def save_cache(self):
"""Store the raw trace data to a database"""
data = self._save_cache()
fh = open("trace." + self._cache_hash(self._args.filename), "w")
pickle.dump(data, fh)
def _save_cache(self):
"""Return the internal data that is needed to be pickled"""
return self.events, self.devlist, self.allocsfrees
def _load_cache(self, data):
"""Get the data from an unpickled object"""
self.events, self.devlist, self.allocsfrees = data
def load_events(self):
"""Get the internal data from a trace file or cache"""
if self._args.filename:
if self._args.cache and self.load_cache():
return
fh = open(self._args.filename)
else:
fh = stdin
self.parse(fh)
if self._args.cache and self._args.filename:
self.save_cache()
def parse(self, fh):
"""Parse the trace file in fh, store data to self"""
mems = {}
dev_by_name = {}
devlist = []
buf_owners = {}
events = []
allocsfrees = [([], []), ([], []), ([], [])] # allocs, frees, current
allocs = 0
frees = 0
curbufs = 0
mem_bytes = 1024 * 1024 * 1024
npages = mem_bytes / 4096
ncols = 512
le_pic = [0] * npages
lastupd = 0
for lineidx, line in enumerate(fh):
# no comments
if line.startswith("#"):
continue
taskpid, cpu, flags, timestamp, func, args = line.strip().split(None, 5)
func = func[:-len(":")]
# unneeded events may be there too
if not func.startswith("dmadebug"):
continue
if self._args.verbosity >= 3:
print line.rstrip()
timestamp = float(timestamp[:-1])
if timestamp < self._args.start:
continue
if timestamp >= self._args.end:
break
devname, addr, size, page, archdata = self.traceliner.parse(args)
if self._args.processes:
devname = taskpid.split("-")[0]
mapping = archdata
try:
memmap = mems[mapping]
except KeyError:
memmap = mem(mapping)
mems[mapping] = memmap
try:
dev = dev_by_name[devname]
except KeyError:
dev = smmu.Device(devname, memmap)
dev_by_name[devname] = dev
devlist.append(dev)
allocfuncs = ["dmadebug_map_page", "dmadebug_map_sg", "dmadebug_alloc_coherent"]
freefuncs = ["dmadebug_unmap_page", "dmadebug_unmap_sg", "dmadebug_free_coherent"]
ignfuncs = []
if timestamp-lastupd > 0.1:
# just some debug prints for now
lastupd = timestamp
print lineidx,timestamp
le_pic2 = [le_pic[i:i+ncols] for i in range(0, npages, ncols)]
#disp_pic(le_pic2)
# animating the bitmap would be cool
#for row in le_pic:
# for i, a in enumerate(row):
# pass
#row[i] = 0.09 * a
if func in allocfuncs:
pages = dev_by_name[devname].alloc(addr, size)
for p in pages:
le_pic[p] = 1
buf_owners[addr] = dev_by_name[devname]
allocs += 1
curbufs += 1
allocsfrees[0][0].append(timestamp)
allocsfrees[0][1].append(allocs)
elif func in freefuncs:
if addr not in buf_owners:
if self._args.verbosity >= 1:
print "warning: %s unmapping unmapped %s" % (dev, addr)
buf_owners[addr] = dev
# fixme: move this to bitmap handling
# get to know the owners of bits
# allocs/frees calls should be traced separately from maps?
# map_pages is traced per page :(
if buf_owners[addr] != dev and self._args.verbosity >= 2:
print "note: %s unmapping [%d,%d) mapped by %s" % (
dev, addr, addr+size, buf_owners[addr])
pages = buf_owners[addr].free(addr, size)
for p in pages:
le_pic[p] = 0
frees -= 1
curbufs -= 1
allocsfrees[1][0].append(timestamp)
allocsfrees[1][1].append(frees)
elif func not in ignfuncs:
raise ValueError("unhandled %s" % func)
allocsfrees[2][0].append(timestamp)
allocsfrees[2][1].append(curbufs)
events.append(Event(timestamp, dev, self.dev_data(dev), size))
self.events = events
self.devlist = devlist
self.allocsfrees = allocsfrees
le_pic2 = [le_pic[i:i+ncols] for i in range(0, npages, ncols)]
# FIXME: not quite ready yet
disp_pic(le_pic2)
return
def mem(asid):
"""Create a new memory object for the given asid space"""
SZ_2G = 2 * 1024 * 1024 * 1024
SZ_1M = 1 * 1024 * 1024
# arch/arm/mach-tegra/include/mach/iomap.h TEGRA_SMMU_(BASE|SIZE)
base = 0x80000000
size = SZ_2G - SZ_1M
return smmu.Memory(base, size, asid)
def get_args():
"""Eat command line arguments, return argparse namespace for settings"""
parser = ArgumentParser()
parser.add_argument("filename", nargs="?",
help="trace file dump, stdin if not given")
parser.add_argument("-s", "--start", type=float, default=0,
help="start timestamp")
parser.add_argument("-e", "--end", type=float, default=1e9,
help="end timestamp")
parser.add_argument("-v", "--verbosity", action="count", default=0,
help="amount of extra information: once for warns (dup addrs), "
"twice for notices (different client in map/unmap), "
"three for echoing all back")
parser.add_argument("-p", "--processes", action="store_true",
help="use processes as memory clients instead of devices")
parser.add_argument("-n", "--no-plots", action="store_true",
help="Don't draw the plots, only read the trace")
parser.add_argument("-c", "--cache", action="store_true",
help="Pickle the data and make a cache file for fast reloading")
parser.add_argument("-m", "--max-plots", type=int,
help="Maximum number of clients to show; show biggest and sum others")
parser.add_argument("-M", "--max-metric", choices=Trace.get_metrics(),
default=Trace.get_metrics()[0],
help="Metric to use when choosing clients in --max-plots")
return parser.parse_args()
def main():
args = get_args()
trace = Trace(args)
trace.load_events()
trace.show()
if __name__ == "__main__":
main()
| gpl-2.0 |
rpalovics/Alpenglow | python/test_alpenglow/experiments/test_FmExperiment.py | 2 | 15194 | import alpenglow as prs
import alpenglow.Getter as rs
import alpenglow.experiments
import alpenglow.evaluation
import pandas as pd
import math
import numpy as np
class TestFactorExperiment:
def test_factorExperiment(self):
fmExperiment = alpenglow.experiments.FmExperiment(
top_k=100,
seed=254938879,
dimension=10,
learning_rate=0.05,
negative_rate=10,
user_attributes="python/test_alpenglow/test_data_4_user_attr",
item_attributes="python/test_alpenglow/test_data_4_item_attr"
)
facRankings = fmExperiment.run(
"python/test_alpenglow/test_data_4",
experimentType="online_id",
verbose=True,
exclude_known=True
)
#print([i for i in facRankings["rank"].fillna(101)])
assert facRankings.top_k == 100
desired_ranks = [101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 12.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 2.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 1.0, 101.0, 101.0, 101.0, 38.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 7.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 8.0, 7.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 9.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 58.0, 101.0, 60.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 65.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 9.0, 5.0, 101.0, 101.0, 101.0, 101.0, 9.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 1.0, 101.0, 10.0, 101.0, 101.0, 101.0, 101.0, 101.0, 65.0, 101.0, 101.0, 101.0, 4.0, 101.0, 57.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 13.0, 101.0, 101.0, 101.0, 101.0, 7.0, 101.0, 101.0, 101.0, 101.0, 101.0, 70.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 68.0, 101.0, 101.0, 101.0, 101.0, 17.0, 101.0, 101.0, 101.0, 94.0, 91.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 78.0, 11.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 89.0, 101.0, 101.0, 101.0, 9.0, 101.0, 3.0, 20.0, 101.0, 101.0, 101.0, 5.0, 101.0, 101.0, 101.0, 101.0, 101.0, 70.0, 86.0, 101.0, 101.0, 101.0, 101.0, 18.0, 101.0, 101.0, 101.0, 101.0, 101.0, 21.0, 5.0, 101.0, 101.0, 15.0, 101.0, 94.0, 101.0, 101.0, 101.0, 81.0, 98.0, 101.0, 101.0, 101.0, 101.0, 101.0, 80.0, 101.0, 101.0, 101.0, 101.0, 101.0, 97.0, 101.0, 101.0, 2.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 7.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 27.0, 101.0, 101.0, 101.0, 101.0, 99.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 2.0, 101.0, 101.0, 24.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 1.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 2.0, 101.0, 1.0, 101.0, 12.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 12.0, 2.0, 101.0, 101.0, 101.0, 101.0, 101.0, 15.0, 101.0, 20.0, 101.0, 101.0, 101.0, 27.0, 14.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 11.0, 101.0, 101.0, 8.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 22.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 23.0, 101.0, 101.0, 101.0, 101.0, 6.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 12.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 28.0, 101.0, 101.0, 101.0, 101.0, 4.0, 101.0, 10.0, 101.0, 101.0, 101.0, 101.0, 101.0, 21.0, 101.0, 101.0, 18.0, 101.0, 3.0, 37.0, 101.0, 101.0, 101.0, 14.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 10.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 30.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 4.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 8.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 3.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 8.0, 101.0, 101.0, 101.0, 101.0, 101.0, 11.0, 101.0, 30.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 2.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 53.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 5.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 4.0, 101.0, 101.0, 101.0, 40.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 38.0, 101.0, 25.0, 101.0, 101.0, 44.0, 101.0, 101.0, 101.0, 7.0, 101.0, 19.0, 101.0, 101.0, 31.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 14.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 21.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 39.0, 101.0, 101.0, 101.0, 101.0, 101.0, 40.0, 101.0, 101.0, 20.0, 101.0, 33.0, 101.0, 40.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 38.0, 101.0, 47.0, 3.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 31.0, 33.0, 101.0, 101.0, 50.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 37.0, 101.0, 101.0, 1.0, 10.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 8.0, 46.0, 101.0, 101.0, 11.0, 26.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 8.0, 37.0, 101.0, 101.0, 10.0, 3.0, 101.0, 101.0, 101.0, 37.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 44.0, 101.0, 101.0, 2.0, 101.0, 101.0, 101.0, 26.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 24.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 33.0, 101.0, 4.0, 101.0, 101.0, 101.0, 101.0, 101.0, 13.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 44.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 12.0, 101.0, 101.0, 101.0, 101.0, 101.0, 6.0, 101.0, 101.0, 36.0, 101.0, 101.0, 101.0, 20.0, 101.0, 44.0, 101.0, 101.0, 101.0, 101.0, 23.0, 101.0, 101.0, 101.0, 24.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 6.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 38.0, 57.0, 7.0, 101.0, 7.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 37.0, 101.0, 101.0, 101.0, 101.0, 101.0, 48.0, 101.0, 101.0, 101.0, 4.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 27.0, 101.0, 101.0, 101.0, 101.0, 101.0]
assert [i for i in facRankings["rank"].fillna(101)] == desired_ranks
def test_factorExperiment2(self):
fmExperiment = alpenglow.experiments.FmExperiment(
top_k=100,
seed=254938879,
dimension=10,
learning_rate=0.05,
negative_rate=10,
user_attributes="python/test_alpenglow/test_data_4_user_attr"
)
facRankings = fmExperiment.run(
"python/test_alpenglow/test_data_4",
experimentType="online_id",
verbose=True,
exclude_known=True
)
assert facRankings.top_k == 100
#print([i for i in facRankings["rank"].fillna(101)])
desired_ranks = [101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 3.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 3.0, 101.0, 101.0, 2.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 23.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 4.0, 101.0, 101.0, 101.0, 33.0, 27.0, 101.0, 101.0, 101.0, 101.0, 101.0, 23.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 4.0, 101.0, 27.0, 23.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 35.0, 101.0, 10.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 33.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 39.0, 101.0, 1.0, 101.0, 101.0, 101.0, 14.0, 101.0, 52.0, 101.0, 101.0, 52.0, 101.0, 101.0, 101.0, 101.0, 19.0, 22.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 68.0, 33.0, 101.0, 101.0, 101.0, 101.0, 64.0, 101.0, 101.0, 62.0, 36.0, 69.0, 57.0, 2.0, 101.0, 5.0, 101.0, 101.0, 101.0, 101.0, 101.0, 75.0, 101.0, 101.0, 101.0, 65.0, 101.0, 37.0, 101.0, 2.0, 101.0, 101.0, 101.0, 101.0, 17.0, 101.0, 101.0, 101.0, 101.0, 10.0, 101.0, 41.0, 53.0, 101.0, 101.0, 45.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 7.0, 30.0, 101.0, 101.0, 101.0, 101.0, 59.0, 60.0, 31.0, 101.0, 101.0, 101.0, 101.0, 55.0, 101.0, 48.0, 101.0, 42.0, 71.0, 101.0, 101.0, 101.0, 101.0, 101.0, 71.0, 73.0, 60.0, 1.0, 37.0, 10.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 84.0, 101.0, 10.0, 101.0, 76.0, 101.0, 101.0, 101.0, 43.0, 101.0, 12.0, 26.0, 101.0, 101.0, 101.0, 47.0, 101.0, 77.0, 101.0, 101.0, 101.0, 93.0, 37.0, 101.0, 101.0, 101.0, 101.0, 31.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 60.0, 101.0, 101.0, 63.0, 101.0, 56.0, 63.0, 101.0, 43.0, 10.0, 66.0, 71.0, 101.0, 101.0, 101.0, 101.0, 96.0, 58.0, 16.0, 101.0, 101.0, 61.0, 101.0, 101.0, 101.0, 13.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 21.0, 38.0, 12.0, 101.0, 101.0, 101.0, 101.0, 74.0, 101.0, 101.0, 2.0, 44.0, 101.0, 78.0, 74.0, 72.0, 101.0, 101.0, 101.0, 80.0, 101.0, 101.0, 1.0, 101.0, 90.0, 16.0, 101.0, 101.0, 101.0, 101.0, 101.0, 87.0, 84.0, 99.0, 10.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 87.0, 101.0, 74.0, 101.0, 49.0, 101.0, 101.0, 101.0, 101.0, 101.0, 26.0, 101.0, 84.0, 74.0, 101.0, 101.0, 38.0, 101.0, 66.0, 16.0, 101.0, 23.0, 101.0, 101.0, 85.0, 47.0, 4.0, 53.0, 101.0, 13.0, 101.0, 101.0, 101.0, 101.0, 13.0, 22.0, 83.0, 101.0, 101.0, 101.0, 37.0, 101.0, 101.0, 82.0, 101.0, 101.0, 10.0, 101.0, 82.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 96.0, 101.0, 13.0, 101.0, 101.0, 101.0, 101.0, 17.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 91.0, 14.0, 101.0, 101.0, 101.0, 76.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 38.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 62.0, 101.0, 3.0, 101.0, 80.0, 101.0, 23.0, 101.0, 101.0, 101.0, 14.0, 101.0, 101.0, 101.0, 29.0, 101.0, 101.0, 1.0, 101.0, 2.0, 72.0, 101.0, 101.0, 70.0, 56.0, 101.0, 89.0, 22.0, 101.0, 5.0, 101.0, 101.0, 101.0, 101.0, 101.0, 45.0, 101.0, 101.0, 101.0, 99.0, 101.0, 101.0, 101.0, 101.0, 101.0, 38.0, 29.0, 67.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 2.0, 101.0, 72.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 73.0, 101.0, 101.0, 101.0, 87.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 80.0, 101.0, 101.0, 42.0, 95.0, 101.0, 101.0, 101.0, 101.0, 101.0, 2.0, 101.0, 99.0, 101.0, 101.0, 91.0, 101.0, 101.0, 101.0, 101.0, 55.0, 101.0, 72.0, 101.0, 101.0, 101.0, 101.0, 101.0, 6.0, 16.0, 21.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 74.0, 101.0, 101.0, 101.0, 101.0, 96.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 15.0, 101.0, 101.0, 97.0, 101.0, 101.0, 101.0, 47.0, 101.0, 101.0, 101.0, 33.0, 101.0, 35.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 83.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 81.0, 46.0, 101.0, 101.0, 101.0, 101.0, 65.0, 101.0, 101.0, 101.0, 6.0, 34.0, 101.0, 101.0, 34.0, 84.0, 101.0, 101.0, 91.0, 101.0, 101.0, 101.0, 11.0, 101.0, 98.0, 101.0, 101.0, 101.0, 101.0, 55.0, 101.0, 101.0, 101.0, 101.0, 95.0, 101.0, 101.0, 101.0, 67.0, 101.0, 48.0, 10.0, 101.0, 101.0, 101.0, 101.0, 101.0, 21.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 98.0, 101.0, 101.0, 101.0, 101.0, 101.0, 57.0, 101.0, 101.0, 78.0, 101.0, 101.0, 101.0, 101.0, 101.0, 43.0, 67.0, 36.0, 101.0, 101.0, 101.0, 82.0, 78.0, 47.0, 101.0, 31.0, 79.0, 2.0, 101.0, 4.0, 101.0, 101.0, 101.0, 29.0, 14.0, 101.0, 101.0, 101.0, 36.0, 101.0, 72.0, 101.0, 10.0, 101.0, 101.0, 25.0, 101.0, 46.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 56.0, 101.0, 101.0, 101.0, 49.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 25.0, 94.0, 101.0, 101.0, 101.0, 101.0, 101.0, 85.0, 101.0, 101.0, 54.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 40.0, 55.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 30.0, 101.0, 101.0, 101.0, 4.0, 101.0, 101.0, 4.0, 5.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 63.0, 101.0, 101.0, 101.0, 101.0, 101.0, 82.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 56.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 14.0, 75.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 60.0, 72.0, 101.0, 101.0, 29.0, 20.0, 81.0, 61.0, 45.0, 57.0, 24.0, 101.0, 101.0, 26.0, 45.0, 1.0, 101.0, 101.0, 14.0, 101.0, 101.0, 25.0, 101.0, 101.0, 101.0, 22.0, 101.0, 101.0, 101.0, 101.0, 75.0, 101.0, 101.0, 4.0, 101.0, 101.0, 101.0, 43.0, 42.0, 47.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 33.0, 101.0, 101.0, 101.0, 101.0, 59.0, 101.0, 101.0, 52.0, 98.0, 63.0, 50.0, 9.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 92.0, 101.0, 101.0, 61.0, 101.0, 55.0, 101.0, 101.0, 101.0, 101.0, 25.0, 10.0, 101.0, 101.0, 101.0, 62.0, 93.0, 101.0, 101.0, 98.0, 61.0, 101.0, 60.0, 82.0, 101.0, 101.0, 95.0, 101.0, 83.0, 101.0, 24.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 49.0, 3.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 73.0, 101.0, 101.0, 101.0, 101.0, 101.0, 94.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 34.0, 101.0, 101.0, 101.0, 101.0, 101.0, 94.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 101.0, 12.0, 44.0, 5.0, 101.0, 49.0, 101.0, 101.0, 101.0, 101.0, 70.0, 101.0, 23.0, 101.0, 94.0, 91.0, 101.0, 101.0, 101.0, 73.0, 101.0, 101.0, 30.0, 101.0, 101.0, 101.0, 27.0, 101.0, 101.0, 101.0, 101.0, 72.0, 101.0, 101.0, 101.0, 22.0, 101.0, 101.0, 101.0, 101.0, 101.0, 26.0, 101.0, 101.0, 101.0, 101.0, 101.0, 90.0, 55.0, 71.0, 101.0, 101.0, 101.0, 101.0, 101.0]
assert list(facRankings["rank"].fillna(101)) == desired_ranks
| apache-2.0 |
kpei/cs-rating | wl_model/spcl_case.py | 1 | 1599 | import pandas as pd
import pymc3 as pm
import numpy as np
def fix_teams(h_teams):
h_teams.loc[7723, 'Name'] = 'Morior Invictus'
h_teams.loc[8241, 'Name'] = 'ex-Nitrious'
h_teams.loc[8349, 'Name'] = 'Good People'
h_teams.loc[8008, 'Name'] = 'Grayhound'
h_teams.loc[5293, 'Name'] = 'AVANT'
h_teams.loc[8030, 'Name'] = 'Not Academy'
return h_teams
def prep_pymc_model(n_teams, n_maps):
with pm.Model() as rating_model:
omega = pm.HalfCauchy('omega', 0.5)
tau = pm.HalfCauchy('tau', 0.5)
rating = pm.Normal('rating', 0, omega, shape=n_teams)
theta_tilde = pm.Normal('rate_t', mu=0, sd=1, shape=(n_maps, n_teams))
rating_map = pm.Deterministic('rating | map', rating + tau * theta_tilde)
alpha = pm.Normal('alpha', 1., 0.2)
beta = pm.Normal('beta', 0.5, 0.2)
sigma = pm.HalfCauchy('sigma', 0.5)
return rating_model
def prep_pymc_time_model(n_teams, n_maps, n_periods):
with pm.Model() as rating_model:
rho = pm.Uniform('rho', -1, 1)
omega = pm.HalfNormal('omega', 0.5)
sigma = pm.HalfNormal('sigma', 0.5)
time_rating = [pm.Normal('rating_0', 0, omega, shape=n_teams)]
theta_tilde = pm.Normal('rate_t', mu=0, sd=1, shape=(n_maps, n_teams))
tau = pm.HalfCauchy('tau', 0.5)
time_rating_map = [pm.Deterministic('rating_0 | map', time_rating[0] + tau * theta_tilde)]
gamma = pm.HalfNormal('gamma', 1.5)
for i in np.arange(1, n_periods):
time_rating.append(pm.Normal('rating_'+str(i), rho*time_rating[i-1], sigma, shape=n_teams))
time_rating_map.append(pm.Deterministic('rating_'+str(i)+' | map', time_rating[i] + tau * theta_tilde))
return rating_model | gpl-3.0 |
aiguofer/bokeh | bokeh/models/sources.py | 1 | 11783 | from __future__ import absolute_import
import warnings
from ..core.has_props import abstract
from ..core.properties import Any, Bool, ColumnData, Dict, Enum, Instance, Int, JSON, List, Seq, String
from ..model import Model
from ..util.dependencies import import_optional
from ..util.warnings import BokehUserWarning
from .callbacks import Callback
pd = import_optional('pandas')
@abstract
class DataSource(Model):
''' A base class for data source types.
'''
selected = Dict(String, Dict(String, Any), default={
'0d': {'glyph': None, 'indices': []},
'1d': {'indices': []},
'2d': {}
}, help="""
A dict to indicate selected indices on different dimensions on this DataSource. Keys are:
.. code-block:: python
# selection information for line and patch glyphs
'0d' : {
# the glyph that was selected
'glyph': None
# array with the [smallest] index of the segment of the line that was hit
'indices': []
}
# selection for most (point-like) glyphs, except lines and patches
'1d': {
# indices of the points included in the selection
indices: []
}
# selection information for multiline and patches glyphs
'2d': {
# mapping of indices of the multiglyph to array of glyph indices that were hit
# e.g. {3: [5, 6], 4, [5]}
}
""")
callback = Instance(Callback, help="""
A callback to run in the browser whenever the selection is changed.
""")
@abstract
class ColumnarDataSource(DataSource):
''' A base class for data source types, which can be mapped onto
a columnar format.
'''
column_names = List(String, help="""
An list of names for all the columns in this DataSource.
""")
class ColumnDataSource(ColumnarDataSource):
''' Maps names of columns to sequences or arrays.
If the ColumnDataSource initializer is called with a single argument that
is a dict or pandas.DataFrame, that argument is used as the value for the
"data" attribute. For example::
ColumnDataSource(mydict) # same as ColumnDataSource(data=mydict)
ColumnDataSource(df) # same as ColumnDataSource(data=df)
.. note::
There is an implicit assumption that all the columns in a
a given ColumnDataSource have the same length.
'''
data = ColumnData(String, Seq(Any), help="""
Mapping of column names to sequences of data. The data can be, e.g,
Python lists or tuples, NumPy arrays, etc.
""").asserts(lambda _, data: len(set(len(x) for x in data.values())) <= 1,
lambda: warnings.warn("ColumnDataSource's columns must be of the same length", BokehUserWarning))
def __init__(self, *args, **kw):
''' If called with a single argument that is a dict or
pandas.DataFrame, treat that implicitly as the "data" attribute.
'''
if len(args) == 1 and "data" not in kw:
kw["data"] = args[0]
# TODO (bev) invalid to pass args and "data", check and raise exception
raw_data = kw.pop("data", {})
if not isinstance(raw_data, dict):
if pd and isinstance(raw_data, pd.DataFrame):
raw_data = self._data_from_df(raw_data)
else:
raise ValueError("expected a dict or pandas.DataFrame, got %s" % raw_data)
super(ColumnDataSource, self).__init__(**kw)
for name, data in raw_data.items():
self.add(data, name)
@staticmethod
def _data_from_df(df):
''' Create a ``dict`` of columns from a Pandas DataFrame,
suitable for creating a ColumnDataSource.
Args:
df (DataFrame) : data to convert
Returns:
dict(str, list)
'''
index = df.index
new_data = {}
for colname in df:
new_data[colname] = df[colname].tolist()
if index.name:
new_data[index.name] = index.tolist()
elif index.names and not all([x is None for x in index.names]):
new_data["_".join(index.names)] = index.tolist()
else:
new_data["index"] = index.tolist()
return new_data
@classmethod
def from_df(cls, data):
''' Create a ``dict`` of columns from a Pandas DataFrame,
suitable for creating a ColumnDataSource.
Args:
data (DataFrame) : data to convert
Returns:
dict[str, list]
'''
return cls._data_from_df(data)
def to_df(self):
''' Convert this data source to pandas dataframe.
If ``column_names`` is set, use those. Otherwise let Pandas
infer the column names. The ``column_names`` property can be
used both to order and filter the columns.
Returns:
DataFrame
'''
if not pd:
raise RuntimeError('Pandas must be installed to convert to a Pandas Dataframe')
if self.column_names:
return pd.DataFrame(self.data, columns=self.column_names)
else:
return pd.DataFrame(self.data)
def add(self, data, name=None):
''' Appends a new column of data to the data source.
Args:
data (seq) : new data to add
name (str, optional) : column name to use.
If not supplied, generate a name of the form "Series ####"
Returns:
str: the column name used
'''
if name is None:
n = len(self.data)
while "Series %d"%n in self.data:
n += 1
name = "Series %d"%n
self.column_names.append(name)
self.data[name] = data
return name
def remove(self, name):
''' Remove a column of data.
Args:
name (str) : name of the column to remove
Returns:
None
.. note::
If the column name does not exist, a warning is issued.
'''
try:
self.column_names.remove(name)
del self.data[name]
except (ValueError, KeyError):
import warnings
warnings.warn("Unable to find column '%s' in data source" % name)
def stream(self, new_data, rollover=None, setter=None):
''' Efficiently update data source columns with new append-only data.
In cases where it is necessary to update data columns in, this method
can efficiently send only the new data, instead of requiring the
entire data set to be re-sent.
Args:
new_data (dict[str, seq]) : a mapping of column names to sequences of
new data to append to each column.
All columns of the data source must be present in ``new_data``,
with identical-length append data.
rollover (int, optional) : A maximum column size, above which data
from the start of the column begins to be discarded. If None,
then columns will continue to grow unbounded (default: None)
Returns:
None
Raises:
ValueError
Example:
.. code-block:: python
source = ColumnDataSource(data=dict(foo=[], bar=[]))
# has new, identical-length updates for all columns in source
new_data = {
'foo' : [10, 20],
'bar' : [100, 200],
}
source.stream(new_data)
'''
import numpy as np
newkeys = set(new_data.keys())
oldkeys = set(self.data.keys())
if newkeys != oldkeys:
missing = oldkeys - newkeys
extra = newkeys - oldkeys
if missing and extra:
raise ValueError(
"Must stream updates to all existing columns (missing: %s, extra: %s)" % (", ".join(sorted(missing)), ", ".join(sorted(extra)))
)
elif missing:
raise ValueError("Must stream updates to all existing columns (missing: %s)" % ", ".join(sorted(missing)))
else:
raise ValueError("Must stream updates to all existing columns (extra: %s)" % ", ".join(sorted(extra)))
lengths = set()
for x in new_data.values():
if isinstance(x, np.ndarray):
if len(x.shape) != 1:
raise ValueError("stream(...) only supports 1d sequences, got ndarray with size %r" % (x.shape,))
lengths.add(x.shape[0])
else:
lengths.add(len(x))
if len(lengths) > 1:
raise ValueError("All streaming column updates must be the same length")
self.data._stream(self.document, self, new_data, rollover, setter)
def patch(self, patches, setter=None):
''' Efficiently update data source columns at specific locations
If it is only necessary to update a small subset of data in a
ColumnDataSource, this method can be used to efficiently update only
the subset, instead of requiring the entire data set to be sent.
This method should be passed a dictionary that maps column names to
lists of tuples, each of the form ``(index, new_value)``. The value
at the given index for that column will be updated with the new value.
Args:
patches (dict[str, list[tuple]]) : lists of patches for each column.
Returns:
None
Raises:
ValueError
Example:
.. code-block:: python
source = ColumnDataSource(data=dict(foo=[10, 20], bar=[100, 200]))
patches = {
'foo' : [ (0, 1) ],
'bar' : [ (0, 101), (1, 201) ],
}
source.patch(patches)
'''
extra = set(patches.keys()) - set(self.data.keys())
if extra:
raise ValueError("Can only patch existing columns (extra: %s)" % ", ".join(sorted(extra)))
for name, patch in patches.items():
max_ind = max(x[0] for x in patch)
if max_ind >= len(self.data[name]):
raise ValueError("Out-of bounds index (%d) in patch for column: %s" % (max_ind, name))
self.data._patch(self.document, self, patches, setter)
class GeoJSONDataSource(ColumnarDataSource):
'''
'''
geojson = JSON(help="""
GeoJSON that contains features for plotting. Currently GeoJSONDataSource can
only process a FeatureCollection or GeometryCollection.
""")
@abstract
class RemoteSource(ColumnDataSource):
'''
'''
data_url = String(help="""
The URL to the endpoint for the data.
""")
polling_interval = Int(help="""
polling interval for updating data source in milliseconds
""")
class AjaxDataSource(RemoteSource):
'''
'''
method = Enum('POST', 'GET', help="http method - GET or POST")
mode = Enum("replace", "append", help="""
Whether to append new data to existing data (up to ``max_size``),
or to replace existing data entirely.
""")
max_size = Int(help="""
Maximum size of the data array being kept after each pull requests.
Larger than that size, the data will be right shifted.
""")
if_modified = Bool(False, help="""
Whether to include an ``If-Modified-Since`` header in AJAX requests
to the server. If this header is supported by the server, then only
new data since the last request will be returned.
""")
content_type = String(default='application/json', help="""
Set the "contentType" parameter for the Ajax request.
""")
http_headers = Dict(String, String, help="""
HTTP headers to set for the Ajax request.
""")
| bsd-3-clause |
kernc/scikit-learn | sklearn/preprocessing/tests/test_label.py | 12 | 17807 | import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
lb = LabelBinarizer()
# one-class case defaults to negative label
inp = ["pos", "pos", "pos", "pos"]
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Sequence of seq type should raise ValueError
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
assert_raises(ValueError, LabelBinarizer().fit_transform, y_seq_of_seqs)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
# Test LabelEncoder's transform and inverse_transform methods
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
le.fit(["apple", "orange"])
msg = "bad input shape"
assert_raise_message(ValueError, msg, le.transform, "apple")
def test_label_encoder_fit_transform():
# Test fit_transform
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
# Fail on unseen labels
le = LabelEncoder()
le.fit([1, 2, 3, 1, -1])
assert_raises(ValueError, le.inverse_transform, [-1])
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
assert_raises(KeyError, mlb.fit(y).transform, [[0]])
mlb = MultiLabelBinarizer(classes=[1, 2])
assert_raises(KeyError, mlb.fit_transform, [[0]])
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
| bsd-3-clause |
Soya93/Extract-Refactoring | python/helpers/pydev/pydev_ipython/matplotlibtools.py | 12 | 5436 |
import sys
backends = {'tk': 'TkAgg',
'gtk': 'GTKAgg',
'wx': 'WXAgg',
'qt': 'Qt4Agg', # qt3 not supported
'qt4': 'Qt4Agg',
'osx': 'MacOSX'}
# We also need a reverse backends2guis mapping that will properly choose which
# GUI support to activate based on the desired matplotlib backend. For the
# most part it's just a reverse of the above dict, but we also need to add a
# few others that map to the same GUI manually:
backend2gui = dict(zip(backends.values(), backends.keys()))
backend2gui['Qt4Agg'] = 'qt'
# In the reverse mapping, there are a few extra valid matplotlib backends that
# map to the same GUI support
backend2gui['GTK'] = backend2gui['GTKCairo'] = 'gtk'
backend2gui['WX'] = 'wx'
backend2gui['CocoaAgg'] = 'osx'
def do_enable_gui(guiname):
from _pydev_bundle.pydev_versioncheck import versionok_for_gui
if versionok_for_gui():
try:
from pydev_ipython.inputhook import enable_gui
enable_gui(guiname)
except:
sys.stderr.write("Failed to enable GUI event loop integration for '%s'\n" % guiname)
import traceback
traceback.print_exc()
elif guiname not in ['none', '', None]:
# Only print a warning if the guiname was going to do something
sys.stderr.write("Debug console: Python version does not support GUI event loop integration for '%s'\n" % guiname)
# Return value does not matter, so return back what was sent
return guiname
def find_gui_and_backend():
"""Return the gui and mpl backend."""
matplotlib = sys.modules['matplotlib']
# WARNING: this assumes matplotlib 1.1 or newer!!
backend = matplotlib.rcParams['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for IPython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
return gui, backend
def is_interactive_backend(backend):
""" Check if backend is interactive """
matplotlib = sys.modules['matplotlib']
from matplotlib.rcsetup import interactive_bk, non_interactive_bk # @UnresolvedImport
if backend in interactive_bk:
return True
elif backend in non_interactive_bk:
return False
else:
return matplotlib.is_interactive()
def patch_use(enable_gui_function):
""" Patch matplotlib function 'use' """
matplotlib = sys.modules['matplotlib']
def patched_use(*args, **kwargs):
matplotlib.real_use(*args, **kwargs)
gui, backend = find_gui_and_backend()
enable_gui_function(gui)
setattr(matplotlib, "real_use", getattr(matplotlib, "use"))
setattr(matplotlib, "use", patched_use)
def patch_is_interactive():
""" Patch matplotlib function 'use' """
matplotlib = sys.modules['matplotlib']
def patched_is_interactive():
return matplotlib.rcParams['interactive']
setattr(matplotlib, "real_is_interactive", getattr(matplotlib, "is_interactive"))
setattr(matplotlib, "is_interactive", patched_is_interactive)
def activate_matplotlib(enable_gui_function):
"""Set interactive to True for interactive backends.
enable_gui_function - Function which enables gui, should be run in the main thread.
"""
matplotlib = sys.modules['matplotlib']
gui, backend = find_gui_and_backend()
is_interactive = is_interactive_backend(backend)
if is_interactive:
enable_gui_function(gui)
if not matplotlib.is_interactive():
sys.stdout.write("Backend %s is interactive backend. Turning interactive mode on.\n" % backend)
matplotlib.interactive(True)
else:
if matplotlib.is_interactive():
sys.stdout.write("Backend %s is non-interactive backend. Turning interactive mode off.\n" % backend)
matplotlib.interactive(False)
patch_use(enable_gui_function)
patch_is_interactive()
def flag_calls(func):
"""Wrap a function to detect and flag when it gets called.
This is a decorator which takes a function and wraps it in a function with
a 'called' attribute. wrapper.called is initialized to False.
The wrapper.called attribute is set to False right before each call to the
wrapped function, so if the call fails it remains False. After the call
completes, wrapper.called is set to True and the output is returned.
Testing for truth in wrapper.called allows you to determine if a call to
func() was attempted and succeeded."""
# don't wrap twice
if hasattr(func, 'called'):
return func
def wrapper(*args,**kw):
wrapper.called = False
out = func(*args,**kw)
wrapper.called = True
return out
wrapper.called = False
wrapper.__doc__ = func.__doc__
return wrapper
def activate_pylab():
pylab = sys.modules['pylab']
pylab.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
pylab.draw_if_interactive = flag_calls(pylab.draw_if_interactive)
def activate_pyplot():
pyplot = sys.modules['matplotlib.pyplot']
pyplot.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
pyplot.draw_if_interactive = flag_calls(pyplot.draw_if_interactive)
| apache-2.0 |
fivejjs/GPy | GPy/models/bayesian_gplvm_minibatch.py | 4 | 11292 | # Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from .. import kern
from ..likelihoods import Gaussian
from ..core.parameterization.variational import NormalPosterior, NormalPrior
from ..inference.latent_function_inference.var_dtc_parallel import VarDTC_minibatch
import logging
from GPy.models.sparse_gp_minibatch import SparseGPMiniBatch
from GPy.core.parameterization.param import Param
class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
"""
Bayesian Gaussian Process Latent Variable Model
:param Y: observed data (np.ndarray) or GPy.likelihood
:type Y: np.ndarray| GPy.likelihood instance
:param input_dim: latent dimensionality
:type input_dim: int
:param init: initialisation method for the latent space
:type init: 'PCA'|'random'
"""
def __init__(self, Y, input_dim, X=None, X_variance=None, init='PCA', num_inducing=10,
Z=None, kernel=None, inference_method=None, likelihood=None,
name='bayesian gplvm', normalizer=None,
missing_data=False, stochastic=False, batchsize=1):
self.logger = logging.getLogger(self.__class__.__name__)
if X is None:
from ..util.initialization import initialize_latent
self.logger.info("initializing latent space X with method {}".format(init))
X, fracs = initialize_latent(init, input_dim, Y)
else:
fracs = np.ones(input_dim)
self.init = init
if Z is None:
self.logger.info("initializing inducing inputs")
Z = np.random.permutation(X.copy())[:num_inducing]
assert Z.shape[1] == X.shape[1]
if X_variance == False:
self.logger.info('no variance on X, activating sparse GPLVM')
X = Param("latent space", X)
elif X_variance is None:
self.logger.info("initializing latent space variance ~ uniform(0,.1)")
X_variance = np.random.uniform(0,.1,X.shape)
self.variational_prior = NormalPrior()
X = NormalPosterior(X, X_variance)
if kernel is None:
self.logger.info("initializing kernel RBF")
kernel = kern.RBF(input_dim, lengthscale=1./fracs, ARD=True) #+ kern.Bias(input_dim) + kern.White(input_dim)
if likelihood is None:
likelihood = Gaussian()
self.kl_factr = 1.
if inference_method is None:
from ..inference.latent_function_inference.var_dtc import VarDTC
self.logger.debug("creating inference_method var_dtc")
inference_method = VarDTC(limit=1 if not missing_data else Y.shape[1])
if kernel.useGPU and isinstance(inference_method, VarDTC_GPU):
kernel.psicomp.GPU_direct = True
super(BayesianGPLVMMiniBatch,self).__init__(X, Y, Z, kernel, likelihood=likelihood,
name=name, inference_method=inference_method,
normalizer=normalizer,
missing_data=missing_data, stochastic=stochastic,
batchsize=batchsize)
self.X = X
self.link_parameter(self.X, 0)
def set_X_gradients(self, X, X_grad):
"""Set the gradients of the posterior distribution of X in its specific form."""
X.mean.gradient, X.variance.gradient = X_grad
def get_X_gradients(self, X):
"""Get the gradients of the posterior distribution of X in its specific form."""
return X.mean.gradient, X.variance.gradient
def _inner_parameters_changed(self, kern, X, Z, likelihood, Y, Y_metadata, Lm=None, dL_dKmm=None, subset_indices=None):
posterior, log_marginal_likelihood, grad_dict, current_values, value_indices = super(BayesianGPLVMMiniBatch, self)._inner_parameters_changed(kern, X, Z, likelihood, Y, Y_metadata, Lm=Lm, dL_dKmm=dL_dKmm, subset_indices=subset_indices)
if self.has_uncertain_inputs():
current_values['meangrad'], current_values['vargrad'] = self.kern.gradients_qX_expectations(
variational_posterior=X,
Z=Z, dL_dpsi0=grad_dict['dL_dpsi0'],
dL_dpsi1=grad_dict['dL_dpsi1'],
dL_dpsi2=grad_dict['dL_dpsi2'])
else:
current_values['Xgrad'] = self.kern.gradients_X(grad_dict['dL_dKnm'], X, Z)
current_values['Xgrad'] += self.kern.gradients_X_diag(grad_dict['dL_dKdiag'], X)
if subset_indices is not None:
value_indices['Xgrad'] = subset_indices['samples']
kl_fctr = self.kl_factr
if self.has_uncertain_inputs():
if self.missing_data:
d = self.output_dim
log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(X)/d
else:
log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(X)
# Subsetting Variational Posterior objects, makes the gradients
# empty. We need them to be 0 though:
X.mean.gradient[:] = 0
X.variance.gradient[:] = 0
self.variational_prior.update_gradients_KL(X)
if self.missing_data:
current_values['meangrad'] += kl_fctr*X.mean.gradient/d
current_values['vargrad'] += kl_fctr*X.variance.gradient/d
else:
current_values['meangrad'] += kl_fctr*X.mean.gradient
current_values['vargrad'] += kl_fctr*X.variance.gradient
if subset_indices is not None:
value_indices['meangrad'] = subset_indices['samples']
value_indices['vargrad'] = subset_indices['samples']
return posterior, log_marginal_likelihood, grad_dict, current_values, value_indices
def _outer_values_update(self, full_values):
"""
Here you put the values, which were collected before in the right places.
E.g. set the gradients of parameters, etc.
"""
super(BayesianGPLVMMiniBatch, self)._outer_values_update(full_values)
if self.has_uncertain_inputs():
self.X.mean.gradient = full_values['meangrad']
self.X.variance.gradient = full_values['vargrad']
else:
self.X.gradient = full_values['Xgrad']
def _outer_init_full_values(self):
if self.has_uncertain_inputs():
return dict(meangrad=np.zeros(self.X.mean.shape),
vargrad=np.zeros(self.X.variance.shape))
else:
return dict(Xgrad=np.zeros(self.X.shape))
def parameters_changed(self):
super(BayesianGPLVMMiniBatch,self).parameters_changed()
if isinstance(self.inference_method, VarDTC_minibatch):
return
def plot_latent(self, labels=None, which_indices=None,
resolution=50, ax=None, marker='o', s=40,
fignum=None, plot_inducing=True, legend=True,
plot_limits=None,
aspect='auto', updates=False, predict_kwargs={}, imshow_kwargs={}):
import sys
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from ..plotting.matplot_dep import dim_reduction_plots
return dim_reduction_plots.plot_latent(self, labels, which_indices,
resolution, ax, marker, s,
fignum, plot_inducing, legend,
plot_limits, aspect, updates, predict_kwargs, imshow_kwargs)
def do_test_latents(self, Y):
"""
Compute the latent representation for a set of new points Y
Notes:
This will only work with a univariate Gaussian likelihood (for now)
"""
N_test = Y.shape[0]
input_dim = self.Z.shape[1]
means = np.zeros((N_test, input_dim))
covars = np.zeros((N_test, input_dim))
dpsi0 = -0.5 * self.input_dim / self.likelihood.variance
dpsi2 = self.grad_dict['dL_dpsi2'][0][None, :, :] # TODO: this may change if we ignore het. likelihoods
V = Y/self.likelihood.variance
#compute CPsi1V
#if self.Cpsi1V is None:
# psi1V = np.dot(self.psi1.T, self.likelihood.V)
# tmp, _ = linalg.dtrtrs(self._Lm, np.asfortranarray(psi1V), lower=1, trans=0)
# tmp, _ = linalg.dpotrs(self.LB, tmp, lower=1)
# self.Cpsi1V, _ = linalg.dtrtrs(self._Lm, tmp, lower=1, trans=1)
dpsi1 = np.dot(self.posterior.woodbury_vector, V.T)
#start = np.zeros(self.input_dim * 2)
from scipy.optimize import minimize
for n, dpsi1_n in enumerate(dpsi1.T[:, :, None]):
args = (input_dim, self.kern.copy(), self.Z, dpsi0, dpsi1_n.T, dpsi2)
res = minimize(latent_cost_and_grad, jac=True, x0=np.hstack((means[n], covars[n])), args=args, method='BFGS')
xopt = res.x
mu, log_S = xopt.reshape(2, 1, -1)
means[n] = mu[0].copy()
covars[n] = np.exp(log_S[0]).copy()
X = NormalPosterior(means, covars)
return X
def dmu_dX(self, Xnew):
"""
Calculate the gradient of the prediction at Xnew w.r.t Xnew.
"""
dmu_dX = np.zeros_like(Xnew)
for i in range(self.Z.shape[0]):
dmu_dX += self.kern.gradients_X(self.grad_dict['dL_dpsi1'][i:i + 1, :], Xnew, self.Z[i:i + 1, :])
return dmu_dX
def dmu_dXnew(self, Xnew):
"""
Individual gradient of prediction at Xnew w.r.t. each sample in Xnew
"""
gradients_X = np.zeros((Xnew.shape[0], self.num_inducing))
ones = np.ones((1, 1))
for i in range(self.Z.shape[0]):
gradients_X[:, i] = self.kern.gradients_X(ones, Xnew, self.Z[i:i + 1, :]).sum(-1)
return np.dot(gradients_X, self.grad_dict['dL_dpsi1'])
def plot_steepest_gradient_map(self, *args, ** kwargs):
"""
See GPy.plotting.matplot_dep.dim_reduction_plots.plot_steepest_gradient_map
"""
import sys
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from ..plotting.matplot_dep import dim_reduction_plots
return dim_reduction_plots.plot_steepest_gradient_map(self,*args,**kwargs)
def latent_cost_and_grad(mu_S, input_dim, kern, Z, dL_dpsi0, dL_dpsi1, dL_dpsi2):
"""
objective function for fitting the latent variables for test points
(negative log-likelihood: should be minimised!)
"""
mu = mu_S[:input_dim][None]
log_S = mu_S[input_dim:][None]
S = np.exp(log_S)
X = NormalPosterior(mu, S)
psi0 = kern.psi0(Z, X)
psi1 = kern.psi1(Z, X)
psi2 = kern.psi2(Z, X)
lik = dL_dpsi0 * psi0.sum() + np.einsum('ij,kj->...', dL_dpsi1, psi1) + np.einsum('ijk,lkj->...', dL_dpsi2, psi2) - 0.5 * np.sum(np.square(mu) + S) + 0.5 * np.sum(log_S)
dLdmu, dLdS = kern.gradients_qX_expectations(dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, X)
dmu = dLdmu - mu
# dS = S0 + S1 + S2 -0.5 + .5/S
dlnS = S * (dLdS - 0.5) + .5
return -lik, -np.hstack((dmu.flatten(), dlnS.flatten()))
| bsd-3-clause |
JingJunYin/tensorflow | tensorflow/contrib/learn/python/learn/estimators/kmeans_test.py | 13 | 20278 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for KMeans."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import time
import numpy as np
from sklearn.cluster import KMeans as SklearnKMeans
# pylint: disable=g-import-not-at-top
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.estimators import kmeans as kmeans_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner
FLAGS = flags.FLAGS
def normalize(x):
return x / np.sqrt(np.sum(x * x, axis=-1, keepdims=True))
def cosine_similarity(x, y):
return np.dot(normalize(x), np.transpose(normalize(y)))
def make_random_centers(num_centers, num_dims, center_norm=500):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * center_norm)
def make_random_points(centers, num_points, max_offset=20):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * max_offset)
return (centers[assignments] + offsets, assignments, np.add.reduce(
offsets * offsets, 1))
class KMeansTestBase(test.TestCase):
def input_fn(self,
batch_size=None,
points=None,
randomize=None,
num_epochs=None):
"""Returns an input_fn that randomly selects batches from given points."""
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
if randomize is None:
randomize = (self.use_mini_batch and
self.mini_batch_steps_per_iteration <= 1)
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return input_lib.limit_epochs(x, num_epochs=num_epochs), None
if randomize:
indices = random_ops.random_uniform(
constant_op.constant([batch_size]),
minval=0,
maxval=num_points - 1,
dtype=dtypes.int32,
seed=10)
else:
# We need to cycle through the indices sequentially. We create a queue
# to maintain the list of indices.
q = data_flow_ops.FIFOQueue(num_points, dtypes.int32, ())
# Conditionally initialize the Queue.
def _init_q():
with ops.control_dependencies(
[q.enqueue_many(math_ops.range(num_points))]):
return control_flow_ops.no_op()
init_q = control_flow_ops.cond(q.size() <= 0, _init_q,
control_flow_ops.no_op)
with ops.control_dependencies([init_q]):
offsets = q.dequeue_many(batch_size)
with ops.control_dependencies([q.enqueue_many(offsets)]):
indices = array_ops.identity(offsets)
batch = array_ops.gather(x, indices)
return (input_lib.limit_epochs(batch, num_epochs=num_epochs), None)
return _fn
@staticmethod
def config(tf_random_seed):
return run_config.RunConfig(tf_random_seed=tf_random_seed)
@property
def initial_clusters(self):
return kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT
@property
def batch_size(self):
return self.num_points
@property
def use_mini_batch(self):
return False
@property
def mini_batch_steps_per_iteration(self):
return 1
class KMeansTest(KMeansTestBase):
def setUp(self):
np.random.seed(3)
self.num_centers = 5
self.num_dims = 2
self.num_points = 1000
self.true_centers = make_random_centers(self.num_centers, self.num_dims)
self.points, _, self.scores = make_random_points(self.true_centers,
self.num_points)
self.true_score = np.add.reduce(self.scores)
def _kmeans(self, relative_tolerance=None):
return kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
random_seed=24,
relative_tolerance=relative_tolerance)
def test_clusters(self):
kmeans = self._kmeans()
kmeans.fit(input_fn=self.input_fn(), steps=1)
clusters = kmeans.clusters()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
kmeans = self._kmeans()
kmeans.fit(input_fn=self.input_fn(), steps=1)
score1 = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
steps = 10 * self.num_points // self.batch_size
kmeans.fit(input_fn=self.input_fn(), steps=steps)
score2 = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
self.assertTrue(score1 > score2)
self.assertNear(self.true_score, score2, self.true_score * 0.05)
def test_monitor(self):
if self.use_mini_batch:
# We don't test for use_mini_batch case since the loss value can be noisy.
return
kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=learn.RunConfig(tf_random_seed=14),
random_seed=12,
relative_tolerance=1e-4)
kmeans.fit(
input_fn=self.input_fn(),
# Force it to train until the relative tolerance monitor stops it.
steps=None)
score = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
self.assertNear(self.true_score, score, self.true_score * 0.01)
def _infer_helper(self, kmeans, clusters, num_points):
points, true_assignments, true_offsets = make_random_points(
clusters, num_points)
# Test predict
assignments = list(
kmeans.predict_cluster_idx(input_fn=self.input_fn(
batch_size=num_points, points=points, num_epochs=1)))
self.assertAllEqual(assignments, true_assignments)
# Test score
score = kmeans.score(
input_fn=lambda: (constant_op.constant(points), None), steps=1)
self.assertNear(score, np.sum(true_offsets), 0.01 * score)
# Test transform
transform = kmeans.transform(
input_fn=lambda: (constant_op.constant(points), None))
true_transform = np.maximum(
0,
np.sum(np.square(points), axis=1,
keepdims=True) - 2 * np.dot(points, np.transpose(clusters)) +
np.transpose(np.sum(np.square(clusters), axis=1, keepdims=True)))
self.assertAllClose(transform, true_transform, rtol=0.05, atol=10)
def test_infer(self):
kmeans = self._kmeans()
# Make a call to fit to initialize the cluster centers.
max_steps = 1
kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
clusters = kmeans.clusters()
# Run inference on small datasets.
self._infer_helper(kmeans, clusters, num_points=10)
self._infer_helper(kmeans, clusters, num_points=1)
class KMeansTestMultiStageInit(KMeansTestBase):
def test_random(self):
points = np.array(
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 0]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
kmeans.fit(
input_fn=self.input_fn(batch_size=1, points=points, randomize=False),
steps=1)
clusters = kmeans.clusters()
self.assertAllEqual(points, clusters)
def test_kmeans_plus_plus_batch_just_right(self):
points = np.array([[1, 2]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
kmeans.fit(
input_fn=self.input_fn(batch_size=1, points=points, randomize=False),
steps=1)
clusters = kmeans.clusters()
self.assertAllEqual(points, clusters)
def test_kmeans_plus_plus_batch_too_small(self):
points = np.array(
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 0]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
with self.assertRaisesOpError(AssertionError):
kmeans.fit(
input_fn=self.input_fn(batch_size=4, points=points, randomize=False),
steps=1)
class MiniBatchKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansCosineDistanceTest(KMeansTestBase):
def setUp(self):
self.points = np.array(
[[2.5, 0.1], [2, 0.2], [3, 0.1], [4, 0.2], [0.1, 2.5], [0.2, 2],
[0.1, 3], [0.2, 4]],
dtype=np.float32)
self.num_points = self.points.shape[0]
self.true_centers = np.array(
[
normalize(
np.mean(normalize(self.points)[0:4, :], axis=0, keepdims=True))[
0],
normalize(
np.mean(normalize(self.points)[4:, :], axis=0, keepdims=True))[
0]
],
dtype=np.float32)
self.true_assignments = np.array([0] * 4 + [1] * 4)
self.true_score = len(self.points) - np.tensordot(
normalize(self.points), self.true_centers[self.true_assignments])
self.num_centers = 2
self.kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
def test_fit(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.clusters())
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
def test_transform(self):
self.kmeans.fit(input_fn=self.input_fn(), steps=10)
centers = normalize(self.kmeans.clusters())
true_transform = 1 - cosine_similarity(self.points, centers)
transform = self.kmeans.transform(input_fn=self.input_fn(
batch_size=self.num_points))
self.assertAllClose(transform, true_transform, atol=1e-3)
def test_predict(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.clusters())
assignments = list(
self.kmeans.predict_cluster_idx(input_fn=self.input_fn(
num_epochs=1, batch_size=self.num_points)))
self.assertAllClose(
centers[assignments],
self.true_centers[self.true_assignments],
atol=1e-2)
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
score = self.kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
self.assertAllClose(score, self.true_score, atol=1e-2)
def test_predict_kmeans_plus_plus(self):
# Most points are concetrated near one center. KMeans++ is likely to find
# the less populated centers.
points = np.array(
[[2.5, 3.5], [2.5, 3.5], [-2, 3], [-2, 3], [-3, -3], [-3.1, -3.2],
[-2.8, -3.], [-2.9, -3.1], [-3., -3.1], [-3., -3.1], [-3.2, -3.],
[-3., -3.]],
dtype=np.float32)
true_centers = np.array(
[
normalize(
np.mean(normalize(points)[0:2, :], axis=0, keepdims=True))[0],
normalize(
np.mean(normalize(points)[2:4, :], axis=0, keepdims=True))[0],
normalize(
np.mean(normalize(points)[4:, :], axis=0, keepdims=True))[0]
],
dtype=np.float32)
true_assignments = [0] * 2 + [1] * 2 + [2] * 8
true_score = len(points) - np.tensordot(
normalize(points), true_centers[true_assignments])
kmeans = kmeans_lib.KMeansClustering(
3,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
kmeans.fit(input_fn=lambda: (constant_op.constant(points), None), steps=30)
centers = normalize(kmeans.clusters())
self.assertAllClose(
sorted(centers.tolist()), sorted(true_centers.tolist()), atol=1e-2)
def _input_fn():
return (input_lib.limit_epochs(
constant_op.constant(points), num_epochs=1), None)
assignments = list(kmeans.predict_cluster_idx(input_fn=_input_fn))
self.assertAllClose(
centers[assignments], true_centers[true_assignments], atol=1e-2)
score = kmeans.score(
input_fn=lambda: (constant_op.constant(points), None), steps=1)
self.assertAllClose(score, true_score, atol=1e-2)
class MiniBatchKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansBenchmark(benchmark.Benchmark):
"""Base class for benchmarks."""
def SetUp(self,
dimension=50,
num_clusters=50,
points_per_cluster=10000,
center_norm=500,
cluster_width=20):
np.random.seed(123456)
self.num_clusters = num_clusters
self.num_points = num_clusters * points_per_cluster
self.centers = make_random_centers(
self.num_clusters, dimension, center_norm=center_norm)
self.points, _, scores = make_random_points(
self.centers, self.num_points, max_offset=cluster_width)
self.score = float(np.sum(scores))
def _report(self, num_iters, start, end, scores):
print(scores)
self.report_benchmark(
iters=num_iters,
wall_time=(end - start) / num_iters,
extras={'true_sum_squared_distances': self.score,
'fit_scores': scores})
def _fit(self, num_iters=10):
pass
def benchmark_01_2dim_5center_500point(self):
self.SetUp(dimension=2, num_clusters=5, points_per_cluster=100)
self._fit()
def benchmark_02_20dim_20center_10kpoint(self):
self.SetUp(dimension=20, num_clusters=20, points_per_cluster=500)
self._fit()
def benchmark_03_100dim_50center_50kpoint(self):
self.SetUp(dimension=100, num_clusters=50, points_per_cluster=1000)
self._fit()
def benchmark_03_100dim_50center_50kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=50,
points_per_cluster=1000,
cluster_width=250)
self._fit()
def benchmark_04_100dim_500center_500kpoint(self):
self.SetUp(dimension=100, num_clusters=500, points_per_cluster=1000)
self._fit(num_iters=4)
def benchmark_05_100dim_500center_500kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=500,
points_per_cluster=1000,
cluster_width=250)
self._fit(num_iters=4)
class TensorflowKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting tensorflow KMeans: %d' % i)
tf_kmeans = kmeans_lib.KMeansClustering(
self.num_clusters,
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
kmeans_plus_plus_num_retries=int(math.log(self.num_clusters) + 2),
random_seed=i * 42,
relative_tolerance=1e-6,
config=run_config.RunConfig(tf_random_seed=3))
tf_kmeans.fit(
input_fn=lambda: (constant_op.constant(self.points), None), steps=50)
_ = tf_kmeans.clusters()
scores.append(
tf_kmeans.score(
input_fn=lambda: (constant_op.constant(self.points), None),
steps=1))
self._report(num_iters, start, time.time(), scores)
class SklearnKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting sklearn KMeans: %d' % i)
sklearn_kmeans = SklearnKMeans(
n_clusters=self.num_clusters,
init='k-means++',
max_iter=50,
n_init=1,
tol=1e-4,
random_state=i * 42)
sklearn_kmeans.fit(self.points)
scores.append(sklearn_kmeans.inertia_)
self._report(num_iters, start, time.time(), scores)
class KMeansTestQueues(test.TestCase):
def input_fn(self):
def _fn():
queue = data_flow_ops.FIFOQueue(
capacity=10, dtypes=dtypes.float32, shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(queue, [enqueue_op]))
return queue.dequeue(), None
return _fn
# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependendent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
kmeans = kmeans_lib.KMeansClustering(5)
kmeans.fit(input_fn=self.input_fn(), steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/pandas/tests/indexes/datetimes/test_datetimelike.py | 9 | 2526 | """ generic tests from the Datetimelike class """
import numpy as np
import pandas as pd
from pandas.util import testing as tm
from pandas import Series, Index, DatetimeIndex, date_range
from ..datetimelike import DatetimeLike
class TestDatetimeIndex(DatetimeLike):
_holder = DatetimeIndex
def setup_method(self, method):
self.indices = dict(index=tm.makeDateIndex(10))
self.setup_indices()
def create_index(self):
return date_range('20130101', periods=5)
def test_shift(self):
# test shift for datetimeIndex and non datetimeIndex
# GH8083
drange = self.create_index()
result = drange.shift(1)
expected = DatetimeIndex(['2013-01-02', '2013-01-03', '2013-01-04',
'2013-01-05',
'2013-01-06'], freq='D')
tm.assert_index_equal(result, expected)
result = drange.shift(-1)
expected = DatetimeIndex(['2012-12-31', '2013-01-01', '2013-01-02',
'2013-01-03', '2013-01-04'],
freq='D')
tm.assert_index_equal(result, expected)
result = drange.shift(3, freq='2D')
expected = DatetimeIndex(['2013-01-07', '2013-01-08', '2013-01-09',
'2013-01-10',
'2013-01-11'], freq='D')
tm.assert_index_equal(result, expected)
def test_pickle_compat_construction(self):
pass
def test_intersection(self):
first = self.index
second = self.index[5:]
intersect = first.intersection(second)
assert tm.equalContents(intersect, second)
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.intersection(case)
assert tm.equalContents(result, second)
third = Index(['a', 'b', 'c'])
result = first.intersection(third)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_union(self):
first = self.index[:5]
second = self.index[5:]
everything = self.index
union = first.union(second)
assert tm.equalContents(union, everything)
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.union(case)
assert tm.equalContents(result, everything)
| mit |
mattgiguere/scikit-learn | examples/decomposition/plot_pca_3d.py | 354 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| bsd-3-clause |
tiagofrepereira2012/tensorflow | tensorflow/python/estimator/canned/linear_testing_utils.py | 14 | 66739 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for testing linear estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import shutil
import tempfile
import numpy as np
import six
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator import run_config
from tensorflow.python.estimator.canned import linear
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import optimizer
from tensorflow.python.training import queue_runner
from tensorflow.python.training import saver
from tensorflow.python.training import session_run_hook
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
# pylint rules which are disabled by default for test files.
# pylint: disable=invalid-name,protected-access,missing-docstring
# Names of variables created by model.
AGE_WEIGHT_NAME = 'linear/linear_model/age/weights'
HEIGHT_WEIGHT_NAME = 'linear/linear_model/height/weights'
BIAS_NAME = 'linear/linear_model/bias_weights'
LANGUAGE_WEIGHT_NAME = 'linear/linear_model/language/weights'
def assert_close(expected, actual, rtol=1e-04, name='assert_close'):
with ops.name_scope(name, 'assert_close', (expected, actual, rtol)) as scope:
expected = ops.convert_to_tensor(expected, name='expected')
actual = ops.convert_to_tensor(actual, name='actual')
rdiff = math_ops.abs(expected - actual, 'diff') / math_ops.abs(expected)
rtol = ops.convert_to_tensor(rtol, name='rtol')
return check_ops.assert_less(
rdiff,
rtol,
data=('Condition expected =~ actual did not hold element-wise:'
'expected = ', expected, 'actual = ', actual, 'rdiff = ', rdiff,
'rtol = ', rtol,),
name=scope)
def save_variables_to_ckpt(model_dir):
init_all_op = [variables.global_variables_initializer()]
with tf_session.Session() as sess:
sess.run(init_all_op)
saver.Saver().save(sess, os.path.join(model_dir, 'model.ckpt'))
def queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = data_flow_ops.FIFOQueue(capacity=100, dtypes=queue_dtypes)
queue_runner.add_queue_runner(
queue_runner.QueueRunner(input_queue,
[input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
def sorted_key_dict(unsorted_dict):
return {k: unsorted_dict[k] for k in sorted(unsorted_dict)}
def sigmoid(x):
return 1 / (1 + np.exp(-1.0 * x))
class CheckPartitionerVarHook(session_run_hook.SessionRunHook):
"""A `SessionRunHook` to check a partitioned variable."""
def __init__(self, test_case, var_name, var_dim, partitions):
self._test_case = test_case
self._var_name = var_name
self._var_dim = var_dim
self._partitions = partitions
def begin(self):
with variable_scope.variable_scope(
variable_scope.get_variable_scope()) as scope:
scope.reuse_variables()
partitioned_weight = variable_scope.get_variable(
self._var_name, shape=(self._var_dim, 1))
self._test_case.assertTrue(
isinstance(partitioned_weight, variables.PartitionedVariable))
for part in partitioned_weight:
self._test_case.assertEqual(self._var_dim // self._partitions,
part.get_shape()[0])
class BaseLinearRegressorPartitionerTest(object):
def __init__(self, linear_regressor_fn):
self._linear_regressor_fn = linear_regressor_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def testPartitioner(self):
x_dim = 64
partitions = 4
def _partitioner(shape, dtype):
del dtype # unused; required by Fn signature.
# Only partition the embedding tensor.
return [partitions, 1] if shape[0] == x_dim else [1]
regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.categorical_column_with_hash_bucket(
'language', hash_bucket_size=x_dim),),
partitioner=_partitioner,
model_dir=self._model_dir)
def _input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english', 'spanish'],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}, [[10.]]
hook = CheckPartitionerVarHook(self, LANGUAGE_WEIGHT_NAME, x_dim,
partitions)
regressor.train(input_fn=_input_fn, steps=1, hooks=[hook])
def testDefaultPartitionerWithMultiplePsReplicas(self):
partitions = 2
# This results in weights larger than the default partition size of 64M,
# so partitioned weights are created (each weight uses 4 bytes).
x_dim = 32 << 20
class FakeRunConfig(run_config.RunConfig):
@property
def num_ps_replicas(self):
return partitions
# Mock the device setter as ps is not available on test machines.
with test.mock.patch.object(
estimator,
'_get_replica_device_setter',
return_value=lambda _: '/cpu:0'):
linear_regressor = self._linear_regressor_fn(
feature_columns=(
feature_column_lib.categorical_column_with_hash_bucket(
'language', hash_bucket_size=x_dim),),
config=FakeRunConfig(),
model_dir=self._model_dir)
def _input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english', 'spanish'],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}, [[10.]]
hook = CheckPartitionerVarHook(self, LANGUAGE_WEIGHT_NAME, x_dim,
partitions)
linear_regressor.train(input_fn=_input_fn, steps=1, hooks=[hook])
# TODO(b/36813849): Add tests with dynamic shape inputs using placeholders.
class BaseLinearRegressorEvaluationTest(object):
def __init__(self, linear_regressor_fn):
self._linear_regressor_fn = linear_regressor_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_evaluation_for_simple_data(self):
with ops.Graph().as_default():
variables.Variable([[11.0]], name=AGE_WEIGHT_NAME)
variables.Variable([2.0], name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
model_dir=self._model_dir)
eval_metrics = linear_regressor.evaluate(
input_fn=lambda: ({'age': ((1,),)}, ((10.,),)), steps=1)
# Logit is (1. * 11.0 + 2.0) = 13, while label is 10. Loss is 3**2 = 9.
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 9.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_batch(self):
"""Tests evaluation for batch_size==2."""
with ops.Graph().as_default():
variables.Variable([[11.0]], name=AGE_WEIGHT_NAME)
variables.Variable([2.0], name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
model_dir=self._model_dir)
eval_metrics = linear_regressor.evaluate(
input_fn=lambda: ({'age': ((1,), (1,))}, ((10.,), (10.,))), steps=1)
# Logit is (1. * 11.0 + 2.0) = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the sum over batch = 9 + 9 = 18
# Average loss is the average over batch = 9
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 18.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_weights(self):
"""Tests evaluation with weights."""
with ops.Graph().as_default():
variables.Variable([[11.0]], name=AGE_WEIGHT_NAME)
variables.Variable([2.0], name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
def _input_fn():
features = {'age': ((1,), (1,)), 'weights': ((1.,), (2.,))}
labels = ((10.,), (10.,))
return features, labels
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
weight_column='weights',
model_dir=self._model_dir)
eval_metrics = linear_regressor.evaluate(input_fn=_input_fn, steps=1)
# Logit is (1. * 11.0 + 2.0) = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the weighted sum over batch = 9 + 2*9 = 27
# average loss is the weighted average = 9 + 2*9 / (1 + 2) = 9
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 27.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_for_multi_dimensions(self):
x_dim = 3
label_dim = 2
with ops.Graph().as_default():
variables.Variable(
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], name=AGE_WEIGHT_NAME)
variables.Variable([7.0, 8.0], name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column(
'age', shape=(x_dim,)),),
label_dimension=label_dim,
model_dir=self._model_dir)
input_fn = numpy_io.numpy_input_fn(
x={
'age': np.array([[2., 4., 5.]]),
},
y=np.array([[46., 58.]]),
batch_size=1,
num_epochs=None,
shuffle=False)
eval_metrics = linear_regressor.evaluate(input_fn=input_fn, steps=1)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
ops.GraphKeys.GLOBAL_STEP), eval_metrics.keys())
# Logit is
# [2., 4., 5.] * [1.0, 2.0] + [7.0, 8.0] = [39, 50] + [7.0, 8.0]
# [3.0, 4.0]
# [5.0, 6.0]
# which is [46, 58]
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
def test_evaluation_for_multiple_feature_columns(self):
with ops.Graph().as_default():
variables.Variable([[10.0]], name=AGE_WEIGHT_NAME)
variables.Variable([[2.0]], name=HEIGHT_WEIGHT_NAME)
variables.Variable([5.0], name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
batch_size = 2
feature_columns = [
feature_column_lib.numeric_column('age'),
feature_column_lib.numeric_column('height')
]
input_fn = numpy_io.numpy_input_fn(
x={'age': np.array([20, 40]),
'height': np.array([4, 8])},
y=np.array([[213.], [421.]]),
batch_size=batch_size,
num_epochs=None,
shuffle=False)
est = self._linear_regressor_fn(
feature_columns=feature_columns, model_dir=self._model_dir)
eval_metrics = est.evaluate(input_fn=input_fn, steps=1)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
ops.GraphKeys.GLOBAL_STEP), eval_metrics.keys())
# Logit is [(20. * 10.0 + 4 * 2.0 + 5.0), (40. * 10.0 + 8 * 2.0 + 5.0)] =
# [213.0, 421.0], while label is [213., 421.]. Loss = 0.
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
class BaseLinearRegressorPredictTest(object):
def __init__(self, linear_regressor_fn):
self._linear_regressor_fn = linear_regressor_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_1d(self):
"""Tests predict when all variables are one-dimensional."""
with ops.Graph().as_default():
variables.Variable([[10.]], name='linear/linear_model/x/weights')
variables.Variable([.2], name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('x'),),
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[2.]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = linear_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# x * weight + bias = 2. * 10. + .2 = 20.2
self.assertAllClose([[20.2]], predicted_scores)
def testMultiDim(self):
"""Tests predict when all variables are multi-dimenstional."""
batch_size = 2
label_dimension = 3
x_dim = 4
feature_columns = (feature_column_lib.numeric_column('x', shape=(x_dim,)),)
with ops.Graph().as_default():
variables.Variable( # shape=[x_dim, label_dimension]
[[1., 2., 3.], [2., 3., 4.], [3., 4., 5.], [4., 5., 6.]],
name='linear/linear_model/x/weights')
variables.Variable( # shape=[label_dimension]
[.2, .4, .6], name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
# x shape=[batch_size, x_dim]
x={'x': np.array([[1., 2., 3., 4.], [5., 6., 7., 8.]])},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predictions = linear_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# score = x * weight + bias, shape=[batch_size, label_dimension]
self.assertAllClose([[30.2, 40.4, 50.6], [70.2, 96.4, 122.6]],
predicted_scores)
def testTwoFeatureColumns(self):
"""Tests predict with two feature columns."""
with ops.Graph().as_default():
variables.Variable([[10.]], name='linear/linear_model/x0/weights')
variables.Variable([[20.]], name='linear/linear_model/x1/weights')
variables.Variable([.2], name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('x0'),
feature_column_lib.numeric_column('x1')),
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x0': np.array([[2.]]),
'x1': np.array([[3.]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = linear_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# x0 * weight0 + x1 * weight1 + bias = 2. * 10. + 3. * 20 + .2 = 80.2
self.assertAllClose([[80.2]], predicted_scores)
class BaseLinearRegressorIntegrationTest(object):
def __init__(self, linear_regressor_fn):
self._linear_regressor_fn = linear_regressor_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
input_dimension, label_dimension, prediction_length):
feature_columns = [
feature_column_lib.numeric_column('x', shape=(input_dimension,))
]
est = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['predictions'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column_lib.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
# Pandas DataFrame natually supports 1 dim data only.
label_dimension = 1
input_dimension = label_dimension
batch_size = 10
data = np.array([1., 2., 3., 4.], dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
prediction_length = 4
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum)),
'y':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum[:label_dimension])),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
class BaseLinearRegressorTrainingTest(object):
def __init__(self, linear_regressor_fn):
self._linear_regressor_fn = linear_regressor_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, expected_loss=None):
expected_var_names = [
'%s/part_0:0' % AGE_WEIGHT_NAME,
'%s/part_0:0' % BIAS_NAME
]
def _minimize(loss, global_step=None, var_list=None):
trainable_vars = var_list or ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(expected_var_names,
[var.name for var in trainable_vars])
# Verify loss. We can't check the value directly, so we add an assert op.
self.assertEquals(0, loss.shape.ndims)
if expected_loss is None:
if global_step is not None:
return state_ops.assign_add(global_step, 1).op
return control_flow_ops.no_op()
assert_loss = assert_close(
math_ops.to_float(expected_loss, name='expected'),
loss,
name='assert_loss')
with ops.control_dependencies((assert_loss,)):
if global_step is not None:
return state_ops.assign_add(global_step, 1).op
return control_flow_ops.no_op()
mock_optimizer = test.mock.NonCallableMock(
spec=optimizer.Optimizer,
wraps=optimizer.Optimizer(use_locking=False, name='my_optimizer'))
mock_optimizer.minimize = test.mock.MagicMock(wraps=_minimize)
# NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks.
# So, return mock_optimizer itself for deepcopy.
mock_optimizer.__deepcopy__ = lambda _: mock_optimizer
return mock_optimizer
def _assert_checkpoint(self,
expected_global_step,
expected_age_weight=None,
expected_bias=None):
shapes = {
name: shape
for (name, shape) in checkpoint_utils.list_variables(self._model_dir)
}
self.assertEqual([], shapes[ops.GraphKeys.GLOBAL_STEP])
self.assertEqual(expected_global_step,
checkpoint_utils.load_variable(self._model_dir,
ops.GraphKeys.GLOBAL_STEP))
self.assertEqual([1, 1], shapes[AGE_WEIGHT_NAME])
if expected_age_weight is not None:
self.assertEqual(expected_age_weight,
checkpoint_utils.load_variable(self._model_dir,
AGE_WEIGHT_NAME))
self.assertEqual([1], shapes[BIAS_NAME])
if expected_bias is not None:
self.assertEqual(expected_bias,
checkpoint_utils.load_variable(self._model_dir,
BIAS_NAME))
def testFromScratchWithDefaultOptimizer(self):
# Create LinearRegressor.
label = 5.
age = 17
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
model_dir=self._model_dir)
# Train for a few steps, and validate final checkpoint.
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self._assert_checkpoint(num_steps)
def testTrainWithOneDimLabel(self):
label_dimension = 1
batch_size = 20
feature_columns = [feature_column_lib.numeric_column('age', shape=(1,))]
est = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)
self.assertEqual((batch_size,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(200)
def testTrainWithOneDimWeight(self):
label_dimension = 1
batch_size = 20
feature_columns = [feature_column_lib.numeric_column('age', shape=(1,))]
est = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
weight_column='w',
model_dir=self._model_dir)
data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)
self.assertEqual((batch_size,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1,
'w': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(200)
def testFromScratch(self):
# Create LinearRegressor.
label = 5.
age = 17
# loss = (logits - label)^2 = (0 - 5.)^2 = 25.
mock_optimizer = self._mock_optimizer(expected_loss=25.)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
expected_global_step=num_steps,
expected_age_weight=0.,
expected_bias=0.)
def testFromCheckpoint(self):
# Create initial checkpoint.
age_weight = 10.0
bias = 5.0
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable([[age_weight]], name=AGE_WEIGHT_NAME)
variables.Variable([bias], name=BIAS_NAME)
variables.Variable(
initial_global_step,
name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = age * age_weight + bias = 17 * 10. + 5. = 175
# loss = (logits - label)^2 = (175 - 5)^2 = 28900
mock_optimizer = self._mock_optimizer(expected_loss=28900.)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({'age': ((17,),)}, ((5.,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
def testFromCheckpointMultiBatch(self):
# Create initial checkpoint.
age_weight = 10.0
bias = 5.0
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable([[age_weight]], name=AGE_WEIGHT_NAME)
variables.Variable([bias], name=BIAS_NAME)
variables.Variable(
initial_global_step,
name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = age * age_weight + bias
# logits[0] = 17 * 10. + 5. = 175
# logits[1] = 15 * 10. + 5. = 155
# loss = sum(logits - label)^2 = (175 - 5)^2 + (155 - 3)^2 = 52004
mock_optimizer = self._mock_optimizer(expected_loss=52004.)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({'age': ((17,), (15,))}, ((5.,), (3.,))),
steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
class BaseLinearClassifierTrainingTest(object):
def __init__(self, linear_classifier_fn):
self._linear_classifier_fn = linear_classifier_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, expected_loss=None):
expected_var_names = [
'%s/part_0:0' % AGE_WEIGHT_NAME,
'%s/part_0:0' % BIAS_NAME
]
def _minimize(loss, global_step):
trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(
expected_var_names,
[var.name for var in trainable_vars])
# Verify loss. We can't check the value directly, so we add an assert op.
self.assertEquals(0, loss.shape.ndims)
if expected_loss is None:
return state_ops.assign_add(global_step, 1).op
assert_loss = assert_close(
math_ops.to_float(expected_loss, name='expected'),
loss,
name='assert_loss')
with ops.control_dependencies((assert_loss,)):
return state_ops.assign_add(global_step, 1).op
mock_optimizer = test.mock.NonCallableMock(
spec=optimizer.Optimizer,
wraps=optimizer.Optimizer(use_locking=False, name='my_optimizer'))
mock_optimizer.minimize = test.mock.MagicMock(wraps=_minimize)
# NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks.
# So, return mock_optimizer itself for deepcopy.
mock_optimizer.__deepcopy__ = lambda _: mock_optimizer
return mock_optimizer
def _assert_checkpoint(
self, n_classes, expected_global_step, expected_age_weight=None,
expected_bias=None):
logits_dimension = n_classes if n_classes > 2 else 1
shapes = {
name: shape for (name, shape) in
checkpoint_utils.list_variables(self._model_dir)
}
self.assertEqual([], shapes[ops.GraphKeys.GLOBAL_STEP])
self.assertEqual(
expected_global_step,
checkpoint_utils.load_variable(
self._model_dir, ops.GraphKeys.GLOBAL_STEP))
self.assertEqual([1, logits_dimension],
shapes[AGE_WEIGHT_NAME])
if expected_age_weight is not None:
self.assertAllEqual(expected_age_weight,
checkpoint_utils.load_variable(
self._model_dir,
AGE_WEIGHT_NAME))
self.assertEqual([logits_dimension], shapes[BIAS_NAME])
if expected_bias is not None:
self.assertAllEqual(expected_bias,
checkpoint_utils.load_variable(
self._model_dir, BIAS_NAME))
def _testFromScratchWithDefaultOptimizer(self, n_classes):
label = 0
age = 17
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
# Train for a few steps, and validate final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self._assert_checkpoint(n_classes, num_steps)
def testBinaryClassesFromScratchWithDefaultOptimizer(self):
self._testFromScratchWithDefaultOptimizer(n_classes=2)
def testMultiClassesFromScratchWithDefaultOptimizer(self):
self._testFromScratchWithDefaultOptimizer(n_classes=4)
def _testTrainWithTwoDimsLabel(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
data_rank_2 = np.array([[0], [1]])
self.assertEqual((2,), data_rank_1.shape)
self.assertEqual((2, 1), data_rank_2.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_2,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithTwoDimsLabel(self):
self._testTrainWithTwoDimsLabel(n_classes=2)
def testMultiClassesTrainWithTwoDimsLabel(self):
self._testTrainWithTwoDimsLabel(n_classes=4)
def _testTrainWithOneDimLabel(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
self.assertEqual((2,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithOneDimLabel(self):
self._testTrainWithOneDimLabel(n_classes=2)
def testMultiClassesTrainWithOneDimLabel(self):
self._testTrainWithOneDimLabel(n_classes=4)
def _testTrainWithTwoDimsWeight(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
weight_column='w',
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
data_rank_2 = np.array([[0], [1]])
self.assertEqual((2,), data_rank_1.shape)
self.assertEqual((2, 1), data_rank_2.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1, 'w': data_rank_2}, y=data_rank_1,
batch_size=batch_size, num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithTwoDimsWeight(self):
self._testTrainWithTwoDimsWeight(n_classes=2)
def testMultiClassesTrainWithTwoDimsWeight(self):
self._testTrainWithTwoDimsWeight(n_classes=4)
def _testTrainWithOneDimWeight(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
weight_column='w',
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
self.assertEqual((2,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1, 'w': data_rank_1}, y=data_rank_1,
batch_size=batch_size, num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithOneDimWeight(self):
self._testTrainWithOneDimWeight(n_classes=2)
def testMultiClassesTrainWithOneDimWeight(self):
self._testTrainWithOneDimWeight(n_classes=4)
def _testFromScratch(self, n_classes):
label = 1
age = 17
# For binary classifer:
# loss = sigmoid_cross_entropy(logits, label) where logits=0 (weights are
# all zero initially) and label = 1 so,
# loss = 1 * -log ( sigmoid(logits) ) = 0.69315
# For multi class classifer:
# loss = cross_entropy(logits, label) where logits are all 0s (weights are
# all zero initially) and label = 1 so,
# loss = 1 * -log ( 1.0 / n_classes )
# For this particular test case, as logits are same, the formular
# 1 * -log ( 1.0 / n_classes ) covers both binary and multi class cases.
mock_optimizer = self._mock_optimizer(
expected_loss=-1 * math.log(1.0/n_classes))
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=num_steps,
expected_age_weight=[[0.]] if n_classes == 2 else [[0.] * n_classes],
expected_bias=[0.] if n_classes == 2 else [.0] * n_classes)
def testBinaryClassesFromScratch(self):
self._testFromScratch(n_classes=2)
def testMultiClassesFromScratch(self):
self._testFromScratch(n_classes=4)
def _testFromCheckpoint(self, n_classes):
# Create initial checkpoint.
label = 1
age = 17
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (
np.reshape(2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# For binary classifer:
# logits = age * age_weight + bias = 17 * 2. - 35. = -1.
# loss = sigmoid_cross_entropy(logits, label)
# so, loss = 1 * -log ( sigmoid(-1) ) = 1.3133
# For multi class classifer:
# loss = cross_entropy(logits, label)
# where logits = 17 * age_weight + bias and label = 1
# so, loss = 1 * -log ( soft_max(logits)[1] )
if n_classes == 2:
expected_loss = 1.3133
else:
logits = age_weight * age + bias
logits_exp = np.exp(logits)
softmax = logits_exp / logits_exp.sum()
expected_loss = -1 * math.log(softmax[0, label])
mock_optimizer = self._mock_optimizer(expected_loss=expected_loss)
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
def testBinaryClassesFromCheckpoint(self):
self._testFromCheckpoint(n_classes=2)
def testMultiClassesFromCheckpoint(self):
self._testFromCheckpoint(n_classes=4)
def _testFromCheckpointFloatLabels(self, n_classes):
"""Tests float labels for binary classification."""
# Create initial checkpoint.
if n_classes > 2:
return
label = 0.8
age = 17
age_weight = [[2.0]]
bias = [-35.0]
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = age * age_weight + bias = 17 * 2. - 35. = -1.
# loss = sigmoid_cross_entropy(logits, label)
# => loss = -0.8 * log(sigmoid(-1)) -0.2 * log(sigmoid(+1)) = 1.1132617
mock_optimizer = self._mock_optimizer(expected_loss=1.1132617)
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
def testBinaryClassesFromCheckpointFloatLabels(self):
self._testFromCheckpointFloatLabels(n_classes=2)
def testMultiClassesFromCheckpointFloatLabels(self):
self._testFromCheckpointFloatLabels(n_classes=4)
def _testFromCheckpointMultiBatch(self, n_classes):
# Create initial checkpoint.
label = [1, 0]
age = [17, 18.5]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (
np.reshape(2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# For binary classifer:
# logits = age * age_weight + bias
# logits[0] = 17 * 2. - 35. = -1.
# logits[1] = 18.5 * 2. - 35. = 2.
# loss = sigmoid_cross_entropy(logits, label)
# so, loss[0] = 1 * -log ( sigmoid(-1) ) = 1.3133
# loss[1] = (1 - 0) * -log ( 1- sigmoid(2) ) = 2.1269
# For multi class classifer:
# loss = cross_entropy(logits, label)
# where logits = [17, 18.5] * age_weight + bias and label = [1, 0]
# so, loss = 1 * -log ( soft_max(logits)[label] )
if n_classes == 2:
expected_loss = (1.3133 + 2.1269)
else:
logits = age_weight * np.reshape(age, (2, 1)) + bias
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
expected_loss = expected_loss_0 + expected_loss_1
mock_optimizer = self._mock_optimizer(expected_loss=expected_loss)
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': (age)}, (label)),
steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
def testBinaryClassesFromCheckpointMultiBatch(self):
self._testFromCheckpointMultiBatch(n_classes=2)
def testMultiClassesFromCheckpointMultiBatch(self):
self._testFromCheckpointMultiBatch(n_classes=4)
class BaseLinearClassifierEvaluationTest(object):
def __init__(self, linear_classifier_fn):
self._linear_classifier_fn = linear_classifier_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _test_evaluation_for_simple_data(self, n_classes):
label = 1
age = 1.
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[-11.0]] if n_classes == 2 else (
np.reshape(-11.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-30.0] if n_classes == 2 else [-30.0] * n_classes
with ops.Graph().as_default():
variables.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=1)
if n_classes == 2:
# Binary classes: loss = sum(corss_entropy(41)) = 41.
expected_metrics = {
metric_keys.MetricKeys.LOSS: 41.,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: 41.,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: 0.,
metric_keys.MetricKeys.LABEL_MEAN: 1.,
metric_keys.MetricKeys.ACCURACY_BASELINE: 1,
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 1.,
}
else:
# Multi classes: loss = 1 * -log ( soft_max(logits)[label] )
logits = age_weight * age + bias
logits_exp = np.exp(logits)
softmax = logits_exp / logits_exp.sum()
expected_loss = -1 * math.log(softmax[0, label])
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_for_simple_data(self):
self._test_evaluation_for_simple_data(n_classes=2)
def test_multi_classes_evaluation_for_simple_data(self):
self._test_evaluation_for_simple_data(n_classes=4)
def _test_evaluation_batch(self, n_classes):
"""Tests evaluation for batch_size==2."""
label = [1, 0]
age = [17., 18.]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (
np.reshape(2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': (age)}, (label)), steps=1)
if n_classes == 2:
# Logits are (-1., 1.) labels are (1, 0).
# Loss is
# loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133
# loss for row 2: (1 - 0) * -log(1 - sigmoid(1)) = 1.3133
expected_loss = 1.3133 * 2
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: 0.5,
metric_keys.MetricKeys.LABEL_MEAN: 0.5,
metric_keys.MetricKeys.ACCURACY_BASELINE: 0.5,
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 0.25,
}
else:
# Multi classes: loss = 1 * -log ( soft_max(logits)[label] )
logits = age_weight * np.reshape(age, (2, 1)) + bias
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
expected_loss = expected_loss_0 + expected_loss_1
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_batch(self):
self._test_evaluation_batch(n_classes=2)
def test_multi_classes_evaluation_batch(self):
self._test_evaluation_batch(n_classes=4)
def _test_evaluation_weights(self, n_classes):
"""Tests evaluation with weights."""
label = [1, 0]
age = [17., 18.]
weights = [1., 2.]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (
np.reshape(2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
weight_column='w',
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': (age), 'w': (weights)}, (label)), steps=1)
if n_classes == 2:
# Logits are (-1., 1.) labels are (1, 0).
# Loss is
# loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133
# loss for row 2: (1 - 0) * -log(1 - sigmoid(1)) = 1.3133
# weights = [1., 2.]
expected_loss = 1.3133 * (1. + 2.)
loss_mean = expected_loss / (1.0 + 2.0)
label_mean = np.average(label, weights=weights)
logits = [-1, 1]
logistics = sigmoid(np.array(logits))
predictions_mean = np.average(logistics, weights=weights)
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: loss_mean,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: predictions_mean,
metric_keys.MetricKeys.LABEL_MEAN: label_mean,
metric_keys.MetricKeys.ACCURACY_BASELINE: (
max(label_mean, 1-label_mean)),
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 0.1668,
}
else:
# Multi classes: unweighted_loss = 1 * -log ( soft_max(logits)[label] )
logits = age_weight * np.reshape(age, (2, 1)) + bias
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
loss_mean = np.average([expected_loss_0, expected_loss_1],
weights=weights)
expected_loss = loss_mean * np.sum(weights)
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: loss_mean,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_weights(self):
self._test_evaluation_weights(n_classes=2)
def test_multi_classes_evaluation_weights(self):
self._test_evaluation_weights(n_classes=4)
class BaseLinearClassifierPredictTest(object):
def __init__(self, linear_classifier_fn):
self._linear_classifier_fn = linear_classifier_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _testPredications(self, n_classes, label_vocabulary, label_output_fn):
"""Tests predict when all variables are one-dimensional."""
age = 1.
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[-11.0]] if n_classes == 2 else (
np.reshape(-11.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [10.0] if n_classes == 2 else [10.0] * n_classes
with ops.Graph().as_default():
variables.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
label_vocabulary=label_vocabulary,
n_classes=n_classes,
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'age': np.array([[age]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = list(est.predict(input_fn=predict_input_fn))
if n_classes == 2:
scalar_logits = np.asscalar(
np.reshape(np.array(age_weight) * age + bias, (1,)))
two_classes_logits = [0, scalar_logits]
two_classes_logits_exp = np.exp(two_classes_logits)
softmax = two_classes_logits_exp / two_classes_logits_exp.sum()
expected_predictions = {
'class_ids': [0],
'classes': [label_output_fn(0)],
'logistic': [sigmoid(np.array(scalar_logits))],
'logits': [scalar_logits],
'probabilities': softmax,
}
else:
onedim_logits = np.reshape(np.array(age_weight) * age + bias, (-1,))
class_ids = onedim_logits.argmax()
logits_exp = np.exp(onedim_logits)
softmax = logits_exp / logits_exp.sum()
expected_predictions = {
'class_ids': [class_ids],
'classes': [label_output_fn(class_ids)],
'logits': onedim_logits,
'probabilities': softmax,
}
self.assertEqual(1, len(predictions))
# assertAllClose cannot handle byte type.
self.assertEqual(expected_predictions['classes'], predictions[0]['classes'])
expected_predictions.pop('classes')
predictions[0].pop('classes')
self.assertAllClose(sorted_key_dict(expected_predictions),
sorted_key_dict(predictions[0]))
def testBinaryClassesWithoutLabelVocabulary(self):
n_classes = 2
self._testPredications(n_classes,
label_vocabulary=None,
label_output_fn=lambda x: ('%s' % x).encode())
def testBinaryClassesWithLabelVocabulary(self):
n_classes = 2
self._testPredications(
n_classes,
label_vocabulary=['class_vocab_{}'.format(i)
for i in range(n_classes)],
label_output_fn=lambda x: ('class_vocab_%s' % x).encode())
def testMultiClassesWithoutLabelVocabulary(self):
n_classes = 4
self._testPredications(
n_classes,
label_vocabulary=None,
label_output_fn=lambda x: ('%s' % x).encode())
def testMultiClassesWithLabelVocabulary(self):
n_classes = 4
self._testPredications(
n_classes,
label_vocabulary=['class_vocab_{}'.format(i)
for i in range(n_classes)],
label_output_fn=lambda x: ('class_vocab_%s' % x).encode())
class BaseLinearClassifierIntegrationTest(object):
def __init__(self, linear_classifier_fn):
self._linear_classifier_fn = linear_classifier_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, n_classes, train_input_fn, eval_input_fn,
predict_input_fn, input_dimension, prediction_length):
feature_columns = [
feature_column_lib.numeric_column('x', shape=(input_dimension,))
]
est = self._linear_classifier_fn(
feature_columns=feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['classes'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, 1), predictions.shape)
# EXPORT
feature_spec = feature_column_lib.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def _test_numpy_input_fn(self, n_classes):
"""Tests complete flow with numpy_input_fn."""
input_dimension = 4
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
target = np.array([1] * batch_size)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=target,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=target,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_numpy_input_fn(self):
self._test_numpy_input_fn(n_classes=2)
def test_multi_classes_numpy_input_fn(self):
self._test_numpy_input_fn(n_classes=4)
def _test_pandas_input_fn(self, n_classes):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
# Pandas DataFrame natually supports 1 dim data only.
input_dimension = 1
batch_size = 10
data = np.array([1., 2., 3., 4.], dtype=np.float32)
target = np.array([1, 0, 1, 0], dtype=np.int32)
x = pd.DataFrame({'x': data})
y = pd.Series(target)
prediction_length = 4
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_pandas_input_fn(self):
self._test_pandas_input_fn(n_classes=2)
def test_multi_classes_pandas_input_fn(self):
self._test_pandas_input_fn(n_classes=4)
def _test_input_fn_from_parse_example(self, n_classes):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
target = np.array([1] * batch_size, dtype=np.int64)
serialized_examples = []
for x, y in zip(data, target):
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=x)),
'y':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=[y])),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([1], dtypes.int64),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_input_fn_from_parse_example(self):
self._test_input_fn_from_parse_example(n_classes=2)
def test_multi_classes_input_fn_from_parse_example(self):
self._test_input_fn_from_parse_example(n_classes=4)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.