repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
Astroua/ammonia | am_hist.py | 1 | 2925 | # coding: utf-8
import os
import glob
import numpy as np
import pylab as py
import pyspeckit as psk
from astropy.io import fits
import matplotlib.pyplot as plt
#fileNames = glob.glob('./nh3/*fits')
#fileNames = glob.glob('./nh3/GSerpBolo3*.n*.fits')
fileNames = glob.glob('./nh3/G010*.n*.fits')
a = np.arange(len(fileNames))
objects = [((os.path.basename(fileNames[name])))[0:-9] for name in range(max(a))]
objects = sorted(set(objects))
c = 3E8
fnameT = './hist_figs/histogram_tkin.png'
# creates an empty array, to store tkin values into for histogram
htkin = []
for thisObject in objects:
spect2 = {}
if os.path.exists('./nh3/'+thisObject+'.n11.fits'):
data1 = fits.getdata('./nh3/'+thisObject+'.n11.fits')
A1 = np.arange(len(data1['DATA'].T))
nu1 = data1['CDELT1']*(A1-data1['CRPIX1']+1)+data1['CRVAL1']
v1 = c*(nu1/data1['RESTFREQ']-1)
spec11 = psk.Spectrum(data=(data1['DATA'].T).squeeze(),xarr=v1,xarrkwargs={'unit':'m/s','refX':data1['RESTFREQ']/1E6,'refX_units':'MHz','xtype':'VLSR-RAD'})
spect2['oneone'] = spec11
if os.path.exists('./nh3/'+thisObject+'.n22.fits'):
data2 = fits.getdata('./nh3/'+thisObject+'.n22.fits')
A2 = np.arange(len(data2['DATA'].T))
nu2 = data2['CDELT1']*(A2 - data2['CRPIX1'] + 1) + data2['CRVAL1']
v2 = c*(nu2/data2['RESTFREQ']-1)
spec22 = psk.Spectrum(data=(data2['DATA'].T).squeeze(),xarr=v2,xarrkwargs={'unit':'m/s','refX':data2['RESTFREQ']/1E6,'refX_units':'MHz','xtype':'VLSR-RAD'})
spect2['twotwo'] = spec22
if os.path.exists('./nh3/'+thisObject+'.n33.fits'):
data3 = fits.getdata('./nh3/'+thisObject+'.n33.fits')
A3 = np.arange(len(data3['DATA'].T))
nu3 = data3['CDELT1']*(A3 - data3['CRPIX1'] + 1) + data3['CRVAL1']
v3 = c*(nu3/data3['RESTFREQ']-1)
spec33 = psk.Spectrum(data=(data3['DATA'].T).squeeze(),xarr=v3,xarrkwargs={'unit':'m/s','refX':data3['RESTFREQ']/1E6,'refX_units':'MHz','xtype':'VLSR-RAD'})
spect2['threethree'] = spec33
if os.path.exists('./nh3/'+thisObject+'.n44.fits'):
data4 = fits.getdata('./nh3/'+thisObject+'.n44.fits')
A4 = np.arange(len(data4['DATA'].T))
nu4 = data4['CDELT1']*(a - data4['CRPIX1'] + 1) + data4['CRVAL1']
v4 = c*(nu4/data4['RESTFREQ']-1)
spec44 = psk.Spectrum(data=(data4['DATA'].T).squeeze(),xarr=v4,xarrkwargs={'unit':'m/s','refX':data4['RESTFREQ']/1E6,'refX_units':'MHz','xtype':'VLSR-RAD'})
spect2['fourfour'] = spec44
spdict1,spectra1 = psk.wrappers.fitnh3.fitnh3tkin(spect2,dobaseline=False)
fitp = spectra1.specfit.modelpars
# the first value in the array is tkin, takes it and stores it into htkin
htkin.append(fitp[0])
# this creates the histogram
plt.clf()
py.hist(htkin,bins=30)
plt.xlabel('Kinetic Temperature (K)')
plt.title('Histogram of T_k of all .fits Files')
plt.savefig(fnameT, format='png')
plt.close()
| mit |
AtsushiSakai/PythonRobotics | Mapping/kmeans_clustering/kmeans_clustering.py | 1 | 3611 | """
Object clustering with k-means algorithm
author: Atsushi Sakai (@Atsushi_twi)
"""
import math
import matplotlib.pyplot as plt
import random
# k means parameters
MAX_LOOP = 10
DCOST_TH = 0.1
show_animation = True
def kmeans_clustering(rx, ry, nc):
clusters = Clusters(rx, ry, nc)
clusters.calc_centroid()
pre_cost = float("inf")
for loop in range(MAX_LOOP):
print("loop:", loop)
cost = clusters.update_clusters()
clusters.calc_centroid()
d_cost = abs(cost - pre_cost)
if d_cost < DCOST_TH:
break
pre_cost = cost
return clusters
class Clusters:
def __init__(self, x, y, n_label):
self.x = x
self.y = y
self.n_data = len(self.x)
self.n_label = n_label
self.labels = [random.randint(0, n_label - 1)
for _ in range(self.n_data)]
self.center_x = [0.0 for _ in range(n_label)]
self.center_y = [0.0 for _ in range(n_label)]
def plot_cluster(self):
for label in set(self.labels):
x, y = self._get_labeled_x_y(label)
plt.plot(x, y, ".")
def calc_centroid(self):
for label in set(self.labels):
x, y = self._get_labeled_x_y(label)
n_data = len(x)
self.center_x[label] = sum(x) / n_data
self.center_y[label] = sum(y) / n_data
def update_clusters(self):
cost = 0.0
for ip in range(self.n_data):
px = self.x[ip]
py = self.y[ip]
dx = [icx - px for icx in self.center_x]
dy = [icy - py for icy in self.center_y]
dist_list = [math.hypot(idx, idy) for (idx, idy) in zip(dx, dy)]
min_dist = min(dist_list)
min_id = dist_list.index(min_dist)
self.labels[ip] = min_id
cost += min_dist
return cost
def _get_labeled_x_y(self, target_label):
x = [self.x[i] for i, label in enumerate(self.labels) if label == target_label]
y = [self.y[i] for i, label in enumerate(self.labels) if label == target_label]
return x, y
def calc_raw_data(cx, cy, n_points, rand_d):
rx, ry = [], []
for (icx, icy) in zip(cx, cy):
for _ in range(n_points):
rx.append(icx + rand_d * (random.random() - 0.5))
ry.append(icy + rand_d * (random.random() - 0.5))
return rx, ry
def update_positions(cx, cy):
# object moving parameters
DX1 = 0.4
DY1 = 0.5
DX2 = -0.3
DY2 = -0.5
cx[0] += DX1
cy[0] += DY1
cx[1] += DX2
cy[1] += DY2
return cx, cy
def main():
print(__file__ + " start!!")
cx = [0.0, 8.0]
cy = [0.0, 8.0]
n_points = 10
rand_d = 3.0
n_cluster = 2
sim_time = 15.0
dt = 1.0
time = 0.0
while time <= sim_time:
print("Time:", time)
time += dt
# objects moving simulation
cx, cy = update_positions(cx, cy)
raw_x, raw_y = calc_raw_data(cx, cy, n_points, rand_d)
clusters = kmeans_clustering(raw_x, raw_y, n_cluster)
# for animation
if show_animation: # pragma: no cover
plt.cla()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect('key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
clusters.plot_cluster()
plt.plot(cx, cy, "or")
plt.xlim(-2.0, 10.0)
plt.ylim(-2.0, 10.0)
plt.pause(dt)
print("Done")
if __name__ == '__main__':
main()
| mit |
Vimos/scikit-learn | examples/svm/plot_custom_kernel.py | 93 | 1562 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(X, Y):
"""
We create a custom kernel:
(2 0)
k(X, Y) = X ( ) Y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(X, M), Y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired, edgecolors='k')
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
CharlesGulian/Deconv | create_coadd.py | 1 | 4720 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 18 16:29:44 2016
@author: charlesgulian
"""
# Co-adding program
# Load a list of images; create a co-added image whose pixels equal the median
# pixel of the corresponding pixels of each image in the list
import os
os.chdir('/Users/annepstein/Work/Deconv')
curr_dir = os.getcwd()
import glob
import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
# ===============================================================================
# Getting image file paths
# Image directory:
image_dir = '/home/DATA/STRIPE82_330-360_AlignCropped/test7'
image_dir = os.path.join(curr_dir,'AstroImages','Good')
image_files = glob.glob(os.path.join(image_dir,'*alignCropped.fits'))
# ===============================================================================
# Generating co-add
# Do not load more than 0.05 GB of data at a time (change this later to 1 GB) on Tesla machine
max_dir_size = 0.05 # GB
# Find size of directory
image_file_sizes = []
for i in range(len(image_files)):
file_size = os.path.getsize(image_files[i]) # Bytes
# Convert to gigabytes:
temp = float(file_size)/float((2**30))
image_file_sizes.append(temp)
directory_size = sum(image_file_sizes)
# ===============================================================================
# Computing optimal allocation of memory for computing co-add
image_dimensions = [1600,1600]
if directory_size > max_dir_size:
print 'Cannot load all images simultaneously; must compute co-add iteratively'
num_pix = image_dimensions[0]*image_dimensions[1]
# Computing maximum NxN pixel sub-region of images that can be loaded simultaneously to compute co-add
# * Note that this calculation assumes the file size of each image corresponds directly to # pixels in image
proportion = max_dir_size/directory_size
# At any time, we can load this proportion of the pixels in each image
# It is more secure to load slightly less than this number of pixels at any time
# To make computations easier, we will load the nearest square factor of 1600x1600 (num_pix)
M = 1.
while 1./(M**2) >= proportion:
M += 1.
N = np.ceil(float(image_dimensions[0])/M)
num_pix_maxload = N*N
if num_pix_maxload > proportion*num_pix:
print 'Error: maximum memory usage exceeded'
print N
else:
M = 1.
N = image_dimensions[0]
# ===============================================================================
# Creating co-add
# From here, write a loop to iteratively load and save an NxN portion of the image
# Take median of all pixels iteratively and save bins of co-add
# Then stitch together to create co-add
# Define empty array for new co-added image
coadd_image = np.zeros(image_dimensions)
# Iterate through each of MxM regions of image:
temp = int(M)
M = temp
for i in range(M):
for j in range(M):
# Create dictionaries for storing data
index_dict = {}
imageBin_dict = {}
# Define indices by which to slice the (i,j)th bin of image
xBinSize,yBinSize = N,N
# Save indices
indices = [int(np.floor(float(i)*xBinSize)),int(np.ceil(float(i+1)*xBinSize)),int(np.floor(float(j)*yBinSize)),int(np.ceil(float(j+1)*yBinSize))]
index_dict[i,j] = indices
for p in range(len(image_files)):
# Select image
image_file = image_files[p]
# Get data
image = fits.getdata(image_file)
# Slice out the (i,j)th bin
imageBin = image[indices[0]:indices[1],indices[2]:indices[3]]
# Save the data from this bin
imageBin_dict[p] = imageBin
# Delete image data to save memory
del image
# Define empty array for (i,j)th bin
coadd_bin = np.zeros([indices[1]-indices[0],indices[3]-indices[2]])
# Create pixel-wise co-add
for v in range(indices[1]-indices[0]):
for w in range(indices[3]-indices[2]):
# Create vector for (v,w)th pixel of each image
pixel_vector = []
for p in range(len(image_files)):
pixel = imageBin_dict[p][v,w]
pixel_vector.append(pixel)
# Find median of pixel vector, add to co-added image array
coadd_pixel = np.median(pixel_vector)
coadd_bin[v,w] = coadd_pixel
coadd_image[indices[0]:indices[1],indices[2]:indices[3]] = coadd_bin
print coadd_image
coadd_image_file = os.path.join(curr_dir,'AstroImages','Coadd','custom_coadd.fits')
fits.writeto(coadd_image_file,coadd_image,clobber=True)
| gpl-3.0 |
SANDAG/pydefm | combined_simulation.py | 2 | 13010 | from flask import Flask
from flask import request, redirect
from flask import render_template
import os
import luigi
import shutil
import defm_luigi as defm
import inc_luigi as inc
import emp_luigi as emp
from db import extract
import shutil
import pandas
import luigi.contrib.hadoop
from sqlalchemy import create_engine
from pysandag.database import get_connection_string
import pandas as pd
from db import sql
from pysandag import database
import warnings
warnings.filterwarnings('ignore', category=pandas.io.pytables.PerformanceWarning)
defm_engine = create_engine(get_connection_string("model_config.yml", 'output_database'))
db_connection_string = database.get_connection_string('model_config.yml', 'in_db')
sql_in_engine = create_engine(db_connection_string)
in_query = getattr(sql, 'max_run_id')
db_run_id = pd.read_sql(in_query, defm_engine, index_col=None)
run_id = db_run_id['id'].iloc[0]
class CombinedSimulation(luigi.Task):
start = luigi.Parameter()
end = luigi.Parameter()
dem = luigi.Parameter()
econ = luigi.Parameter()
def requires(self):
return {'def': defm.Iter(start=self.start, end=self.end, dem=self.dem, econ=self.econ),
'inc': inc.IncomeByType(econ=self.econ, dem=self.dem),
'emp': emp.PersonalIncome(econ=self.econ, dem=self.dem)}
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
print 'Completed combined simulation'
app = Flask(__name__)
@app.route('/')
def my_form():
econ_sim_ids = extract.create_df('econ_sim_ids', 'econ_sim_ids_table', rate_id=0, index=None)
dem_sim_ids = extract.create_df('dem_sim_ids', 'dem_sim_ids_table', rate_id=0, index=None)
dems = zip(dem_sim_ids['demographic_simulation_id'], dem_sim_ids['desc_short'])
econs = econ_sim_ids['economic_simulation_id'].tolist()
startyear = range(2011, 2050)
endyear = range(2012, 2051)
return render_template("my-form.html", result1=dems, result2=econs, startyear=startyear, endyear=endyear)
@app.route('/', methods=['POST'])
def my_form_post():
dem = request.form['dem']
econ = request.form['econ']
start_year = request.form['starty']
end_year = request.form['endy']
# os.system("luigid")
luigi.run(main_task_cls=CombinedSimulation, cmdline_args=['--start=' + str(start_year), '--end=' + str(end_year), '--dem=' + str(dem), '--econ=' + str(econ)])
return redirect('/bokeh')
@app.route('/bokeh')
def my_form_post2():
return render_template("bokeh-results.html")
results_sql = '''SELECT "Population" as "Population"
,"Year"
,deaths_hhp_non_mil as "Deaths"
,mig_in - mig_out as "Net Migration"
,new_born as "Births"
FROM defm.population_summary
WHERE "Run_id" =''' + str(run_id) + ''' and "Year" >2010 ORDER BY "Year" '''
results_df = pd.read_sql(results_sql, defm_engine, index_col='Year')
feature_names = results_df.columns[0:].values.tolist()
@app.route('/bokeh/pop')
def my_form_post_pop():
# Determine the selected feature
current_feature_name = request.args.get("feature_name")
if current_feature_name is None:
current_feature_name = "Population"
listx= results_df.index.values.tolist()
new_list = []
for item in listx:
new_list.append(str(item))
listy= results_df[current_feature_name].tolist()
new_list2 = []
for item in listy:
new_list2.append(int(item))
chart = {"renderTo": 'chart_ID', "type": 'line', "height": 450}
series = [{"name": str(current_feature_name), "data": new_list2}]
title = {"text": str(current_feature_name) + ' Trends'}
xAxis = {"title": {"text": 'Year'}, "categories": new_list}
yAxis = {"title": {"text": 'Persons'}}
# render template
html = render_template(
'result-form.html',
feature_names=feature_names,
current_feature_name=current_feature_name,
chartID='chart_ID', chart=chart, series=series, title=title, xAxis=xAxis, yAxis=yAxis
)
return html
race_sql = '''SELECT yr as "Year",
SUM(CASE WHEN race_ethn = 'B' THEN persons ELSE 0 END) as "Black (NH)",
SUM(CASE WHEN race_ethn = 'H' THEN persons ELSE 0 END) as "Hispanic",
SUM(CASE WHEN race_ethn = 'S' THEN persons ELSE 0 END) as "Asian (NH)",
SUM(CASE WHEN race_ethn = 'W' THEN persons ELSE 0 END) as "White (NH)",
SUM(CASE WHEN race_ethn = 'O' THEN persons ELSE 0 END) as "Other (NH)"
FROM defm.population
WHERE run_id=''' + str(run_id) + ''' and yr >2010 GROUP BY yr ORDER BY yr'''
race_df = pd.read_sql(race_sql, defm_engine, index_col='Year')
race_cat1 = race_df.columns[0:].values.tolist()
@app.route('/bokeh/pop_race')
def my_form_post_pop_by_race():
# Determine the selected feature
current_race_list = request.args.getlist("race_list1")
if len(current_race_list) == 0:
current_race_list = race_cat1
listx = race_df.index.values.tolist()
new_list = []
for item in listx:
new_list.append(str(item))
series = []
for x in current_race_list:
listy = race_df[str(x)].tolist()
new_list2 = []
for item in listy:
new_list2.append(float(item))
series.append({"name": str(x), "data": new_list2})
chart = {"renderTo": 'chart_ID', "type": 'area', "height": 600, "width": 1000}
title = {"text": 'Population by Race Trends'}
xAxis = {"title": {"text": 'Year'}, "categories": new_list}
yAxis = {"title": {"text": 'Count / Persons'}}
plotOptions = {"area": {"stacking": 'percent'}, "lineColor": '#ffffff',
"lineWidth": '1',
"marker": {
"lineWidth": '1',
"lineColor": '#ffffff'
}
}
# render template
html = render_template(
'result-form-pop-race.html',
race_list=race_cat1,
current_race_list=current_race_list,
chartID='chart_ID', chart=chart, series=series, title=title, xAxis=xAxis, yAxis=yAxis, plotOptions=plotOptions
)
return html
econ_sql = '''SELECT yr as "Year",
labor_force,
unemployed,
work_force,
work_force_outside,
work_force_local,
jobs_local,
jobs_total,
jobs_external,
avg_wage,
jobs_total_wages,
jobs_local_wages,
jobs_external_wages,
wf_outside_wages,
military_income,
unearned_income,
"Selfemp_Income",
personal_income,
taxable_retail_sales
FROM defm.emp_summary WHERE run_id = ''' + str(run_id) + ''' ORDER BY yr'''
econ_df = pd.read_sql(econ_sql, defm_engine, index_col='Year')
econ_cat = econ_df.columns[0:].values.tolist()
@app.route('/bokeh/econ')
def my_form_post_econ():
# Determine the selected feature
current_feature_name = request.args.get("feature_name")
if current_feature_name is None:
current_feature_name = "personal_income"
listx = econ_df.index.values.tolist()
new_list = []
for item in listx:
new_list.append(str(item))
listy = econ_df[current_feature_name].tolist()
new_list2 = []
for item in listy:
new_list2.append(float(item))
chart = {"renderTo": 'chart_ID', "type": 'line', "height": 450}
series = [{"name": str(current_feature_name), "data": new_list2}]
title = {"text": str(current_feature_name) + ' Trends'}
xAxis = {"title": {"text": 'Year'}, "categories": new_list}
yAxis = {"title": {"text": 'Count / Persons'}}
# render template
html = render_template(
'result-form-econ.html',
feature_names=econ_cat,
current_feature_name=current_feature_name,
chartID='chart_ID', chart=chart, series=series, title=title, xAxis=xAxis, yAxis=yAxis
)
return html
birth_sql = '''SELECT [birth_rate_id] as rate_id
,[yr] as "Year"
,CASE
WHEN race = 'B' THEN 'Black (NH)'
WHEN race = 'H' THEN 'Hispanic'
WHEN race = 'S' THEN 'Asian (NH)'
WHEN race = 'W' THEN 'White (NH)'
WHEN race = 'O' THEN 'Other (NH)' ELSE 'None' END as race
,sum([birth_rate]) as fertility_rates
FROM [isam].[demographic_rates].[birth_rates]
GROUP BY yr, race, birth_rate_id
ORDER BY race, yr '''
birth_df = pd.read_sql(birth_sql, sql_in_engine, index_col=None)
rate_id_cat = birth_df.rate_id.unique()
race_cat = birth_df.race.unique()
@app.route('/birth_rates')
def my_form_post_brith_rates():
# Determine the selected feature
current_rate_id = request.args.get("rate")
current_race_list = request.args.getlist("race_list1")
if current_rate_id is None:
current_rate_id = 101
if len(current_race_list) == 0:
current_race_list = race_cat
listx = birth_df.Year.unique()
new_list = []
for item in listx:
new_list.append(str(item))
series = []
for x in current_race_list:
df = birth_df.loc[(birth_df.rate_id == int(current_rate_id)) & (birth_df.race == str(x))]
listy = df['fertility_rates'].tolist()
new_list2 = []
for item in listy:
new_list2.append(float(item))
series.append({"name": str(x), "data": new_list2})
chart = {"renderTo": 'chart_ID', "type": 'line', "height": 600, "width": 1000}
title = {"text": 'Fertility Rates by Race'}
xAxis = {"title": {"text": 'Year'}, "categories": new_list}
yAxis = {"title": {"text": 'Fertility Rates'}}
# render template
html = render_template(
'birth-rates.html',
rate_id_list=rate_id_cat,
current_rate_id=current_rate_id,
race_list=race_cat,
current_race_list=current_race_list,
chartID='chart_ID', chart=chart, series=series, title=title, xAxis=xAxis, yAxis=yAxis
)
return html
death_sql = '''SELECT [death_rate_id] as rate_id
,[yr] as "Year"
,[age]
,CASE
WHEN [race]+[sex] = 'BF' THEN 'Black (NH) - Female'
WHEN [race]+[sex] = 'BM' THEN 'Black (NH) - Male'
WHEN [race]+[sex]= 'HF' THEN 'Hispanic - Female'
WHEN [race]+[sex]= 'HM' THEN 'Hispanic - Male'
WHEN [race]+[sex] = 'SF' THEN 'Asian (NH) - Female'
WHEN [race]+[sex] = 'SM' THEN 'Asian (NH) - Male'
WHEN [race]+[sex] = 'WF' THEN 'White (NH) - Female'
WHEN [race]+[sex] = 'WM' THEN 'White (NH) - Male'
WHEN [race]+[sex] = 'OF' THEN 'Other (NH) - Female'
WHEN [race]+[sex] = 'OM' THEN 'Other (NH) - Male' ELSE 'None' END as race
,[death_rate]
FROM [isam].[demographic_rates].[death_rates]
WHERE age < 100
ORDER BY age, yr'''
death_df = pd.read_sql(death_sql, sql_in_engine, index_col=None)
death_rate_id_cat = death_df.rate_id.unique()
death_year_cat = death_df.Year.unique()
death_race_cat = death_df.race.unique()
@app.route('/death_rates')
def my_form_post_death_rates():
# Determine the selected feature
current_rate_id = request.args.get("rate")
current_year_id = request.args.get("year")
current_race_list = request.args.getlist("race_list1")
if current_rate_id is None:
current_rate_id = 101
if current_year_id is None:
current_year_id = death_year_cat.min()
if len(current_race_list) == 0:
current_race_list = death_race_cat
listx = death_df.Year.unique()
new_list = []
for item in listx:
new_list.append(str(item))
series = []
for x in current_race_list:
df = death_df.loc[(death_df.rate_id == int(current_rate_id)) & (death_df.race == x) &
(death_df.Year == int(current_year_id))]
listy = df['death_rate'].tolist()
new_list2 = []
for item in listy:
new_list2.append(float(item))
series.append({"name": str(x), "data": new_list2})
chart = {"renderTo": 'chart_ID', "type": 'line', "height": 600, "width": 1000}
title = {"text": 'Death Rates by Race'}
xAxis = {"title": {"text": 'Year'}, "categories": new_list}
yAxis = {"title": {"text": 'Death Rates'}}
# render template
html = render_template(
'death-rates.html',
year_list=death_year_cat,
current_year_id=current_year_id,
rate_id_list=rate_id_cat,
current_rate_id=current_rate_id,
race_list=death_race_cat,
current_race_list=current_race_list,
chartID='chart_ID', chart=chart, series=series, title=title, xAxis=xAxis, yAxis=yAxis
)
return html
if __name__ == '__main__':
shutil.rmtree('temp')
os.makedirs('temp')
app.run()
| apache-2.0 |
Paul-St-Young/solid_hydrogen | qharv_db/meta.py | 1 | 5031 | import os
import numpy as np
import pandas as pd
from qharv.reel import mole
# ======================== level 0: paths and inputs =========================
def get_pcdirs(
tmp_dir='inp_text',
pd_name='proj_dir.dat',
fdl_name='folder_list.dat'
):
""" read project dir and calc dirs from inputs
default to the following directory structure:
$ls -R
./inp_text:
folder_list.dat proj_dir.dat
proj_dir.dat should hold the absolute path to project folder
folder_list.dat should hold a list of QMC calculation directories
Args:
tmp_dir (str, optional): temporary folder to hold inputs and meta data
pd_name (str, optional): file holding project directory
fdl_name (str, optional): file holding calculation folder list
Return:
tuple: (str, list) i.e. (proj_dir, folder_list)
"""
# locate project directory
pd_dat = os.path.join(tmp_dir, pd_name)
with open(pd_dat, 'r') as f:
proj_dir = f.read().strip('\n')
# locate folders to analyze
fdlist_dat = os.path.join(tmp_dir, fdl_name)
with open(fdlist_dat,'r') as f:
folder_list = f.read().split('\n')[:-1]
return proj_dir, folder_list
def sra_label(subdir, sep='-'):
tokens = subdir.split(sep)
sname = tokens[0]
rst = tokens[1]
cat = tokens[2]
rs = float(rst.replace('rs', ''))
ca = float(cat.replace('ca', ''))
return {'sname':sname, 'rs':rs, 'ca':ca}
def get_task_dir(subdir):
sra = sra_label(subdir)
sname = sra['sname']
rs = sra['rs']
task_dir_map = {
'c2c': '56-c2c-dft-geo/ecut50-k8',
'cmca4': '57-cmca4-dft-geo/ecut50-k8',
'cmca12': '58-cmca12-dft-geo/ecut50-k8',
'i41amd': '44-i4-twist/ecut50-k8'
}
if rs < 1.21:
task_dir_map = {
'c2c': '79-c2c-dft-geo/ecut50-k8',
'cmca4': '78-cmca4-dft-geo/ecut50-k8',
'i41amd': '77-i4-twist/ecut50-k8',
'cmca12': None
}
if (sname == 'cmca4') & (rs < 1.19):
task_dir_map['cmca4'] = '86-cmca4-kgrid12-geo/ecut50-k8'
task_dir = task_dir_map[sname]
return task_dir
def collect_first_input(folderl):
""" collect input text into a database
example:
proj_dir, fdl = get_pcdirs()
folderl = [os.path.join(proj_dir, folder) for folder in fdl]
mdf = collect_first_input(folderl)
Args:
folderl (list): a list of QMC calculation folders
Return:
pd.DataFrame: mdf contains ['path', 'inp_text'] columns. mdf collects the
first input xml mole finds in each folder.
"""
data = []
for folder in folderl:
fin = mole.files_with_regex('*.in.xml',folder)[0]
with open(fin,'r') as f:
inp_text = f.read()
# end with
entry = {'path':folder,'inp_text':inp_text}
data.append(entry)
# end for
mdf = pd.DataFrame(data)
return mdf
def find_all_groups_and_series(folderl):
gsdata = []
for folder in folderl:
flist = mole.files_with_regex('*scalar.dat', folder)
for floc in flist:
fdat = os.path.basename(floc)
meta = mole.interpret_qmcpack_fname(fdat)
meta['path'] = folder
meta['fdat'] = fdat
gsdata.append(meta)
# end for
# end for
gsdf = pd.DataFrame(gsdata).sort_values('group')
return gsdf
def get_prefix_from_path(path, proj_dir, sep='_'):
task_dir = path.replace(proj_dir, '')
prefix = sep.join([seg for seg in task_dir.split('/')
if seg not in ['.', '..']]).strip(sep)
return prefix
def meta_from_path(path0):
ipbe = path0.find('pbe')
if ipbe < 0:
raise RuntimeError('unknown path %s' % path0)
path = path0[ipbe:]
tokens = path.split('/')
ttrst = tokens[1]
ntict = tokens[2]
tt, rst = ttrst.split('-')
nt, ict = ntict.split('-')
temp = int(tt.replace('t', ''))
rs = float(rst.replace('rs', ''))
natom = int(nt.replace('h', ''))
iconf = int(ict.replace('i', ''))
entry = {'temp': temp, 'rs': rs, 'natom': natom, 'iconf': iconf}
return entry
# ====================== level 1: parse input =======================
def get_axes_pos(doc):
from qharv.seed import xml
axes = xml.get_axes(doc)
pos = xml.get_pos(doc)
entry = {'axes':axes.tolist(), 'pos':pos.tolist()}
return entry
def get_density(doc):
from qharv.inspect import axes_pos
entry = get_axes_pos(doc)
volume = axes_pos.volume(entry['axes'])
natom = len(entry['pos'])
rho = natom/volume
rs = (3./(4*np.pi*rho))**(1./3)
return {'rs':rs, 'volume':volume, 'rho':rho}
# =================== level 2: read QMC database ====================
def get_force_columns(cols, name='force', idx_dim=-1):
fcols = [c.replace('_mean', '') for c in cols
if c.startswith(name) and c != '%s_mean' % name
and c.endswith('_mean')]
# sort columns
def iatom_idim(c):
tokens = c.split('_')
iatom = int(tokens[idx_dim-1])
idim = int(tokens[idx_dim])
return {'iatom': iatom, 'idim': idim}
fdf = pd.DataFrame([iatom_idim(c) for c in fcols], dtype=int)
fdf.iatom = fdf.iatom.astype(int)
fdf.idim = fdf.idim.astype(int)
fdf['force'] = fcols
fcols = fdf.sort_values(['iatom', 'idim']).force.values.tolist()
return fcols
| mit |
ManuSchmi88/landlab | landlab/plot/graph.py | 2 | 1973 | import numpy as np
import matplotlib.pyplot as plt
def plot_nodes(graph, color='r'):
for node in range(len(graph.x_of_node)):
x, y = graph.x_of_node[node], graph.y_of_node[node]
plt.plot(graph.x_of_node[node], graph.y_of_node[node], 'o',
color=color)
plt.text(x, y, node, color=color, size=16)
plt.xlabel('x')
plt.ylabel('y')
plt.gca().set_aspect(1.)
def plot_links(graph, color='b', linestyle='solid'):
for link, nodes in enumerate(graph.nodes_at_link):
x, y = graph.x_of_node[nodes[0]], graph.y_of_node[nodes[0]]
dx, dy = graph.x_of_node[nodes[1]] - x, graph.y_of_node[nodes[1]] - y
plt.arrow(x, y, dx, dy, head_width=.1, length_includes_head=True,
color=color, linestyle=linestyle)
plt.text(x + dx * .5, y + dy * .5, link, size=16, color=color)
plt.xlabel('x')
plt.ylabel('y')
plt.gca().set_aspect(1.)
def plot_patches(graph, color='g'):
for patch, nodes in enumerate(graph.nodes_at_patch):
x, y = np.mean(graph.x_of_node[nodes]), np.mean(graph.y_of_node[nodes])
plt.text(x, y, patch, color=color, size=16)
plt.xlabel('x')
plt.ylabel('y')
plt.gca().set_aspect(1.)
def plot_graph(graph, at='node,link,patch'):
locs = [loc.strip() for loc in at.split(',')]
for loc in locs:
if loc not in ('node', 'link', 'patch', 'corner', 'face', 'cell'):
raise ValueError(
'{at}: "at" element not understood'.format(at=loc))
plt.plot(graph.x_of_node, graph.y_of_node, '.', color='r')
if 'node' in locs:
plot_nodes(graph)
if 'link' in locs:
plot_links(graph)
if 'patch' in locs:
plot_patches(graph)
if 'corner' in locs:
plot_nodes(graph.dual, color='c')
if 'face' in locs:
plot_links(graph.dual, linestyle='dotted', color='k')
if 'cell' in locs:
plot_patches(graph.dual, color='m')
plt.show()
| mit |
sumanau7/Ele_CC_Sumanau | lib/IPython/core/interactiveshell.py | 6 | 132557 | # -*- coding: utf-8 -*-
"""Main IPython class."""
#-----------------------------------------------------------------------------
# Copyright (C) 2001 Janko Hauser <[email protected]>
# Copyright (C) 2001-2007 Fernando Perez. <[email protected]>
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
from __future__ import absolute_import, print_function
import __future__
import abc
import ast
import atexit
import functools
import os
import re
import runpy
import sys
import tempfile
import traceback
import types
import subprocess
import warnings
from io import open as io_open
from pickleshare import PickleShareDB
from traitlets.config.configurable import SingletonConfigurable
from IPython.core import debugger, oinspect
from IPython.core import magic
from IPython.core import page
from IPython.core import prefilter
from IPython.core import shadowns
from IPython.core import ultratb
from IPython.core.alias import Alias, AliasManager
from IPython.core.autocall import ExitAutocall
from IPython.core.builtin_trap import BuiltinTrap
from IPython.core.events import EventManager, available_events
from IPython.core.compilerop import CachingCompiler, check_linecache_ipython
from IPython.core.display_trap import DisplayTrap
from IPython.core.displayhook import DisplayHook
from IPython.core.displaypub import DisplayPublisher
from IPython.core.error import InputRejected, UsageError
from IPython.core.extensions import ExtensionManager
from IPython.core.formatters import DisplayFormatter
from IPython.core.history import HistoryManager
from IPython.core.inputsplitter import IPythonInputSplitter, ESC_MAGIC, ESC_MAGIC2
from IPython.core.logger import Logger
from IPython.core.macro import Macro
from IPython.core.payload import PayloadManager
from IPython.core.prefilter import PrefilterManager
from IPython.core.profiledir import ProfileDir
from IPython.core.prompts import PromptManager
from IPython.core.usage import default_banner
from IPython.testing.skipdoctest import skip_doctest
from IPython.utils import PyColorize
from IPython.utils import io
from IPython.utils import py3compat
from IPython.utils import openpy
from IPython.utils.contexts import NoOpContext
from IPython.utils.decorators import undoc
from IPython.utils.io import ask_yes_no
from IPython.utils.ipstruct import Struct
from IPython.paths import get_ipython_dir
from IPython.utils.path import get_home_dir, get_py_filename, unquote_filename, ensure_dir_exists
from IPython.utils.process import system, getoutput
from IPython.utils.py3compat import (builtin_mod, unicode_type, string_types,
with_metaclass, iteritems)
from IPython.utils.strdispatch import StrDispatch
from IPython.utils.syspathcontext import prepended_to_syspath
from IPython.utils.text import (format_screen, LSString, SList,
DollarFormatter)
from traitlets import (Integer, Bool, CBool, CaselessStrEnum, Enum,
List, Dict, Unicode, Instance, Type)
from IPython.utils.warn import warn, error
import IPython.core.hooks
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# compiled regexps for autoindent management
dedent_re = re.compile(r'^\s+raise|^\s+return|^\s+pass')
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
@undoc
def softspace(file, newvalue):
"""Copied from code.py, to remove the dependency"""
oldvalue = 0
try:
oldvalue = file.softspace
except AttributeError:
pass
try:
file.softspace = newvalue
except (AttributeError, TypeError):
# "attribute-less object" or "read-only attributes"
pass
return oldvalue
@undoc
def no_op(*a, **kw): pass
class SpaceInInput(Exception): pass
@undoc
class Bunch: pass
def get_default_colors():
if sys.platform=='darwin':
return "LightBG"
elif os.name=='nt':
return 'Linux'
else:
return 'Linux'
class SeparateUnicode(Unicode):
r"""A Unicode subclass to validate separate_in, separate_out, etc.
This is a Unicode based trait that converts '0'->'' and ``'\\n'->'\n'``.
"""
def validate(self, obj, value):
if value == '0': value = ''
value = value.replace('\\n','\n')
return super(SeparateUnicode, self).validate(obj, value)
@undoc
class DummyMod(object):
"""A dummy module used for IPython's interactive module when
a namespace must be assigned to the module's __dict__."""
pass
class ExecutionResult(object):
"""The result of a call to :meth:`InteractiveShell.run_cell`
Stores information about what took place.
"""
execution_count = None
error_before_exec = None
error_in_exec = None
result = None
@property
def success(self):
return (self.error_before_exec is None) and (self.error_in_exec is None)
def raise_error(self):
"""Reraises error if `success` is `False`, otherwise does nothing"""
if self.error_before_exec is not None:
raise self.error_before_exec
if self.error_in_exec is not None:
raise self.error_in_exec
class InteractiveShell(SingletonConfigurable):
"""An enhanced, interactive shell for Python."""
_instance = None
ast_transformers = List([], config=True, help=
"""
A list of ast.NodeTransformer subclass instances, which will be applied
to user input before code is run.
"""
)
autocall = Enum((0,1,2), default_value=0, config=True, help=
"""
Make IPython automatically call any callable object even if you didn't
type explicit parentheses. For example, 'str 43' becomes 'str(43)'
automatically. The value can be '0' to disable the feature, '1' for
'smart' autocall, where it is not applied if there are no more
arguments on the line, and '2' for 'full' autocall, where all callable
objects are automatically called (even if no arguments are present).
"""
)
# TODO: remove all autoindent logic and put into frontends.
# We can't do this yet because even runlines uses the autoindent.
autoindent = CBool(True, config=True, help=
"""
Autoindent IPython code entered interactively.
"""
)
automagic = CBool(True, config=True, help=
"""
Enable magic commands to be called without the leading %.
"""
)
banner1 = Unicode(default_banner, config=True,
help="""The part of the banner to be printed before the profile"""
)
banner2 = Unicode('', config=True,
help="""The part of the banner to be printed after the profile"""
)
cache_size = Integer(1000, config=True, help=
"""
Set the size of the output cache. The default is 1000, you can
change it permanently in your config file. Setting it to 0 completely
disables the caching system, and the minimum value accepted is 20 (if
you provide a value less than 20, it is reset to 0 and a warning is
issued). This limit is defined because otherwise you'll spend more
time re-flushing a too small cache than working
"""
)
color_info = CBool(True, config=True, help=
"""
Use colors for displaying information about objects. Because this
information is passed through a pager (like 'less'), and some pagers
get confused with color codes, this capability can be turned off.
"""
)
colors = CaselessStrEnum(('NoColor','LightBG','Linux'),
default_value=get_default_colors(), config=True,
help="Set the color scheme (NoColor, Linux, or LightBG)."
)
colors_force = CBool(False, help=
"""
Force use of ANSI color codes, regardless of OS and readline
availability.
"""
# FIXME: This is essentially a hack to allow ZMQShell to show colors
# without readline on Win32. When the ZMQ formatting system is
# refactored, this should be removed.
)
debug = CBool(False, config=True)
deep_reload = CBool(False, config=True, help=
"""
**Deprecated**
Will be removed in IPython 6.0
Enable deep (recursive) reloading by default. IPython can use the
deep_reload module which reloads changes in modules recursively (it
replaces the reload() function, so you don't need to change anything to
use it). `deep_reload` forces a full reload of modules whose code may
have changed, which the default reload() function does not. When
deep_reload is off, IPython will use the normal reload(), but
deep_reload will still be available as dreload().
"""
)
disable_failing_post_execute = CBool(False, config=True,
help="Don't call post-execute functions that have failed in the past."
)
display_formatter = Instance(DisplayFormatter, allow_none=True)
displayhook_class = Type(DisplayHook)
display_pub_class = Type(DisplayPublisher)
data_pub_class = None
exit_now = CBool(False)
exiter = Instance(ExitAutocall)
def _exiter_default(self):
return ExitAutocall(self)
# Monotonically increasing execution counter
execution_count = Integer(1)
filename = Unicode("<ipython console>")
ipython_dir= Unicode('', config=True) # Set to get_ipython_dir() in __init__
# Input splitter, to transform input line by line and detect when a block
# is ready to be executed.
input_splitter = Instance('IPython.core.inputsplitter.IPythonInputSplitter',
(), {'line_input_checker': True})
# This InputSplitter instance is used to transform completed cells before
# running them. It allows cell magics to contain blank lines.
input_transformer_manager = Instance('IPython.core.inputsplitter.IPythonInputSplitter',
(), {'line_input_checker': False})
logstart = CBool(False, config=True, help=
"""
Start logging to the default log file in overwrite mode.
Use `logappend` to specify a log file to **append** logs to.
"""
)
logfile = Unicode('', config=True, help=
"""
The name of the logfile to use.
"""
)
logappend = Unicode('', config=True, help=
"""
Start logging to the given file in append mode.
Use `logfile` to specify a log file to **overwrite** logs to.
"""
)
object_info_string_level = Enum((0,1,2), default_value=0,
config=True)
pdb = CBool(False, config=True, help=
"""
Automatically call the pdb debugger after every exception.
"""
)
multiline_history = CBool(sys.platform != 'win32', config=True,
help="Save multi-line entries as one entry in readline history"
)
display_page = Bool(False, config=True,
help="""If True, anything that would be passed to the pager
will be displayed as regular output instead."""
)
# deprecated prompt traits:
prompt_in1 = Unicode('In [\\#]: ', config=True,
help="Deprecated, will be removed in IPython 5.0, use PromptManager.in_template")
prompt_in2 = Unicode(' .\\D.: ', config=True,
help="Deprecated, will be removed in IPython 5.0, use PromptManager.in2_template")
prompt_out = Unicode('Out[\\#]: ', config=True,
help="Deprecated, will be removed in IPython 5.0, use PromptManager.out_template")
prompts_pad_left = CBool(True, config=True,
help="Deprecated, will be removed in IPython 5.0, use PromptManager.justify")
def _prompt_trait_changed(self, name, old, new):
table = {
'prompt_in1' : 'in_template',
'prompt_in2' : 'in2_template',
'prompt_out' : 'out_template',
'prompts_pad_left' : 'justify',
}
warn("InteractiveShell.{name} is deprecated, use PromptManager.{newname}".format(
name=name, newname=table[name])
)
# protect against weird cases where self.config may not exist:
if self.config is not None:
# propagate to corresponding PromptManager trait
setattr(self.config.PromptManager, table[name], new)
_prompt_in1_changed = _prompt_trait_changed
_prompt_in2_changed = _prompt_trait_changed
_prompt_out_changed = _prompt_trait_changed
_prompt_pad_left_changed = _prompt_trait_changed
show_rewritten_input = CBool(True, config=True,
help="Show rewritten input, e.g. for autocall."
)
quiet = CBool(False, config=True)
history_length = Integer(10000, config=True)
history_load_length = Integer(1000, config=True, help=
"""
The number of saved history entries to be loaded
into the readline buffer at startup.
"""
)
# The readline stuff will eventually be moved to the terminal subclass
# but for now, we can't do that as readline is welded in everywhere.
readline_use = CBool(True, config=True)
readline_remove_delims = Unicode('-/~', config=True)
readline_delims = Unicode() # set by init_readline()
# don't use \M- bindings by default, because they
# conflict with 8-bit encodings. See gh-58,gh-88
readline_parse_and_bind = List([
'tab: complete',
'"\C-l": clear-screen',
'set show-all-if-ambiguous on',
'"\C-o": tab-insert',
'"\C-r": reverse-search-history',
'"\C-s": forward-search-history',
'"\C-p": history-search-backward',
'"\C-n": history-search-forward',
'"\e[A": history-search-backward',
'"\e[B": history-search-forward',
'"\C-k": kill-line',
'"\C-u": unix-line-discard',
], config=True)
_custom_readline_config = False
def _readline_parse_and_bind_changed(self, name, old, new):
# notice that readline config is customized
# indicates that it should have higher priority than inputrc
self._custom_readline_config = True
ast_node_interactivity = Enum(['all', 'last', 'last_expr', 'none'],
default_value='last_expr', config=True,
help="""
'all', 'last', 'last_expr' or 'none', specifying which nodes should be
run interactively (displaying output from expressions).""")
# TODO: this part of prompt management should be moved to the frontends.
# Use custom TraitTypes that convert '0'->'' and '\\n'->'\n'
separate_in = SeparateUnicode('\n', config=True)
separate_out = SeparateUnicode('', config=True)
separate_out2 = SeparateUnicode('', config=True)
wildcards_case_sensitive = CBool(True, config=True)
xmode = CaselessStrEnum(('Context','Plain', 'Verbose'),
default_value='Context', config=True)
# Subcomponents of InteractiveShell
alias_manager = Instance('IPython.core.alias.AliasManager', allow_none=True)
prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
builtin_trap = Instance('IPython.core.builtin_trap.BuiltinTrap', allow_none=True)
display_trap = Instance('IPython.core.display_trap.DisplayTrap', allow_none=True)
extension_manager = Instance('IPython.core.extensions.ExtensionManager', allow_none=True)
payload_manager = Instance('IPython.core.payload.PayloadManager', allow_none=True)
history_manager = Instance('IPython.core.history.HistoryAccessorBase', allow_none=True)
magics_manager = Instance('IPython.core.magic.MagicsManager', allow_none=True)
profile_dir = Instance('IPython.core.application.ProfileDir', allow_none=True)
@property
def profile(self):
if self.profile_dir is not None:
name = os.path.basename(self.profile_dir.location)
return name.replace('profile_','')
# Private interface
_post_execute = Dict()
# Tracks any GUI loop loaded for pylab
pylab_gui_select = None
def __init__(self, ipython_dir=None, profile_dir=None,
user_module=None, user_ns=None,
custom_exceptions=((), None), **kwargs):
# This is where traits with a config_key argument are updated
# from the values on config.
super(InteractiveShell, self).__init__(**kwargs)
self.configurables = [self]
# These are relatively independent and stateless
self.init_ipython_dir(ipython_dir)
self.init_profile_dir(profile_dir)
self.init_instance_attrs()
self.init_environment()
# Check if we're in a virtualenv, and set up sys.path.
self.init_virtualenv()
# Create namespaces (user_ns, user_global_ns, etc.)
self.init_create_namespaces(user_module, user_ns)
# This has to be done after init_create_namespaces because it uses
# something in self.user_ns, but before init_sys_modules, which
# is the first thing to modify sys.
# TODO: When we override sys.stdout and sys.stderr before this class
# is created, we are saving the overridden ones here. Not sure if this
# is what we want to do.
self.save_sys_module_state()
self.init_sys_modules()
# While we're trying to have each part of the code directly access what
# it needs without keeping redundant references to objects, we have too
# much legacy code that expects ip.db to exist.
self.db = PickleShareDB(os.path.join(self.profile_dir.location, 'db'))
self.init_history()
self.init_encoding()
self.init_prefilter()
self.init_syntax_highlighting()
self.init_hooks()
self.init_events()
self.init_pushd_popd_magic()
# self.init_traceback_handlers use to be here, but we moved it below
# because it and init_io have to come after init_readline.
self.init_user_ns()
self.init_logger()
self.init_builtins()
# The following was in post_config_initialization
self.init_inspector()
# init_readline() must come before init_io(), because init_io uses
# readline related things.
self.init_readline()
# We save this here in case user code replaces raw_input, but it needs
# to be after init_readline(), because PyPy's readline works by replacing
# raw_input.
if py3compat.PY3:
self.raw_input_original = input
else:
self.raw_input_original = raw_input
# init_completer must come after init_readline, because it needs to
# know whether readline is present or not system-wide to configure the
# completers, since the completion machinery can now operate
# independently of readline (e.g. over the network)
self.init_completer()
# TODO: init_io() needs to happen before init_traceback handlers
# because the traceback handlers hardcode the stdout/stderr streams.
# This logic in in debugger.Pdb and should eventually be changed.
self.init_io()
self.init_traceback_handlers(custom_exceptions)
self.init_prompts()
self.init_display_formatter()
self.init_display_pub()
self.init_data_pub()
self.init_displayhook()
self.init_magics()
self.init_alias()
self.init_logstart()
self.init_pdb()
self.init_extension_manager()
self.init_payload()
self.init_deprecation_warnings()
self.hooks.late_startup_hook()
self.events.trigger('shell_initialized', self)
atexit.register(self.atexit_operations)
def get_ipython(self):
"""Return the currently running IPython instance."""
return self
#-------------------------------------------------------------------------
# Trait changed handlers
#-------------------------------------------------------------------------
def _ipython_dir_changed(self, name, new):
ensure_dir_exists(new)
def set_autoindent(self,value=None):
"""Set the autoindent flag, checking for readline support.
If called with no arguments, it acts as a toggle."""
if value != 0 and not self.has_readline:
if os.name == 'posix':
warn("The auto-indent feature requires the readline library")
self.autoindent = 0
return
if value is None:
self.autoindent = not self.autoindent
else:
self.autoindent = value
#-------------------------------------------------------------------------
# init_* methods called by __init__
#-------------------------------------------------------------------------
def init_ipython_dir(self, ipython_dir):
if ipython_dir is not None:
self.ipython_dir = ipython_dir
return
self.ipython_dir = get_ipython_dir()
def init_profile_dir(self, profile_dir):
if profile_dir is not None:
self.profile_dir = profile_dir
return
self.profile_dir =\
ProfileDir.create_profile_dir_by_name(self.ipython_dir, 'default')
def init_instance_attrs(self):
self.more = False
# command compiler
self.compile = CachingCompiler()
# Make an empty namespace, which extension writers can rely on both
# existing and NEVER being used by ipython itself. This gives them a
# convenient location for storing additional information and state
# their extensions may require, without fear of collisions with other
# ipython names that may develop later.
self.meta = Struct()
# Temporary files used for various purposes. Deleted at exit.
self.tempfiles = []
self.tempdirs = []
# Keep track of readline usage (later set by init_readline)
self.has_readline = False
# keep track of where we started running (mainly for crash post-mortem)
# This is not being used anywhere currently.
self.starting_dir = py3compat.getcwd()
# Indentation management
self.indent_current_nsp = 0
# Dict to track post-execution functions that have been registered
self._post_execute = {}
def init_environment(self):
"""Any changes we need to make to the user's environment."""
pass
def init_encoding(self):
# Get system encoding at startup time. Certain terminals (like Emacs
# under Win32 have it set to None, and we need to have a known valid
# encoding to use in the raw_input() method
try:
self.stdin_encoding = sys.stdin.encoding or 'ascii'
except AttributeError:
self.stdin_encoding = 'ascii'
def init_syntax_highlighting(self):
# Python source parser/formatter for syntax highlighting
pyformat = PyColorize.Parser().format
self.pycolorize = lambda src: pyformat(src,'str',self.colors)
def init_pushd_popd_magic(self):
# for pushd/popd management
self.home_dir = get_home_dir()
self.dir_stack = []
def init_logger(self):
self.logger = Logger(self.home_dir, logfname='ipython_log.py',
logmode='rotate')
def init_logstart(self):
"""Initialize logging in case it was requested at the command line.
"""
if self.logappend:
self.magic('logstart %s append' % self.logappend)
elif self.logfile:
self.magic('logstart %s' % self.logfile)
elif self.logstart:
self.magic('logstart')
def init_deprecation_warnings(self):
"""
register default filter for deprecation warning.
This will allow deprecation warning of function used interactively to show
warning to users, and still hide deprecation warning from libraries import.
"""
warnings.filterwarnings("default", category=DeprecationWarning, module=self.user_ns.get("__name__"))
def init_builtins(self):
# A single, static flag that we set to True. Its presence indicates
# that an IPython shell has been created, and we make no attempts at
# removing on exit or representing the existence of more than one
# IPython at a time.
builtin_mod.__dict__['__IPYTHON__'] = True
# In 0.11 we introduced '__IPYTHON__active' as an integer we'd try to
# manage on enter/exit, but with all our shells it's virtually
# impossible to get all the cases right. We're leaving the name in for
# those who adapted their codes to check for this flag, but will
# eventually remove it after a few more releases.
builtin_mod.__dict__['__IPYTHON__active'] = \
'Deprecated, check for __IPYTHON__'
self.builtin_trap = BuiltinTrap(shell=self)
def init_inspector(self):
# Object inspector
self.inspector = oinspect.Inspector(oinspect.InspectColors,
PyColorize.ANSICodeColors,
'NoColor',
self.object_info_string_level)
def init_io(self):
# This will just use sys.stdout and sys.stderr. If you want to
# override sys.stdout and sys.stderr themselves, you need to do that
# *before* instantiating this class, because io holds onto
# references to the underlying streams.
if (sys.platform == 'win32' or sys.platform == 'cli') and self.has_readline:
io.stdout = io.stderr = io.IOStream(self.readline._outputfile)
else:
io.stdout = io.IOStream(sys.stdout)
io.stderr = io.IOStream(sys.stderr)
def init_prompts(self):
self.prompt_manager = PromptManager(shell=self, parent=self)
self.configurables.append(self.prompt_manager)
# Set system prompts, so that scripts can decide if they are running
# interactively.
sys.ps1 = 'In : '
sys.ps2 = '...: '
sys.ps3 = 'Out: '
def init_display_formatter(self):
self.display_formatter = DisplayFormatter(parent=self)
self.configurables.append(self.display_formatter)
def init_display_pub(self):
self.display_pub = self.display_pub_class(parent=self)
self.configurables.append(self.display_pub)
def init_data_pub(self):
if not self.data_pub_class:
self.data_pub = None
return
self.data_pub = self.data_pub_class(parent=self)
self.configurables.append(self.data_pub)
def init_displayhook(self):
# Initialize displayhook, set in/out prompts and printing system
self.displayhook = self.displayhook_class(
parent=self,
shell=self,
cache_size=self.cache_size,
)
self.configurables.append(self.displayhook)
# This is a context manager that installs/revmoes the displayhook at
# the appropriate time.
self.display_trap = DisplayTrap(hook=self.displayhook)
def init_virtualenv(self):
"""Add a virtualenv to sys.path so the user can import modules from it.
This isn't perfect: it doesn't use the Python interpreter with which the
virtualenv was built, and it ignores the --no-site-packages option. A
warning will appear suggesting the user installs IPython in the
virtualenv, but for many cases, it probably works well enough.
Adapted from code snippets online.
http://blog.ufsoft.org/2009/1/29/ipython-and-virtualenv
"""
if 'VIRTUAL_ENV' not in os.environ:
# Not in a virtualenv
return
# venv detection:
# stdlib venv may symlink sys.executable, so we can't use realpath.
# but others can symlink *to* the venv Python, so we can't just use sys.executable.
# So we just check every item in the symlink tree (generally <= 3)
p = os.path.normcase(sys.executable)
paths = [p]
while os.path.islink(p):
p = os.path.normcase(os.path.join(os.path.dirname(p), os.readlink(p)))
paths.append(p)
p_venv = os.path.normcase(os.environ['VIRTUAL_ENV'])
if any(p.startswith(p_venv) for p in paths):
# Running properly in the virtualenv, don't need to do anything
return
warn("Attempting to work in a virtualenv. If you encounter problems, please "
"install IPython inside the virtualenv.")
if sys.platform == "win32":
virtual_env = os.path.join(os.environ['VIRTUAL_ENV'], 'Lib', 'site-packages')
else:
virtual_env = os.path.join(os.environ['VIRTUAL_ENV'], 'lib',
'python%d.%d' % sys.version_info[:2], 'site-packages')
import site
sys.path.insert(0, virtual_env)
site.addsitedir(virtual_env)
#-------------------------------------------------------------------------
# Things related to injections into the sys module
#-------------------------------------------------------------------------
def save_sys_module_state(self):
"""Save the state of hooks in the sys module.
This has to be called after self.user_module is created.
"""
self._orig_sys_module_state = {'stdin': sys.stdin,
'stdout': sys.stdout,
'stderr': sys.stderr,
'excepthook': sys.excepthook}
self._orig_sys_modules_main_name = self.user_module.__name__
self._orig_sys_modules_main_mod = sys.modules.get(self.user_module.__name__)
def restore_sys_module_state(self):
"""Restore the state of the sys module."""
try:
for k, v in iteritems(self._orig_sys_module_state):
setattr(sys, k, v)
except AttributeError:
pass
# Reset what what done in self.init_sys_modules
if self._orig_sys_modules_main_mod is not None:
sys.modules[self._orig_sys_modules_main_name] = self._orig_sys_modules_main_mod
#-------------------------------------------------------------------------
# Things related to the banner
#-------------------------------------------------------------------------
@property
def banner(self):
banner = self.banner1
if self.profile and self.profile != 'default':
banner += '\nIPython profile: %s\n' % self.profile
if self.banner2:
banner += '\n' + self.banner2
return banner
def show_banner(self, banner=None):
if banner is None:
banner = self.banner
self.write(banner)
#-------------------------------------------------------------------------
# Things related to hooks
#-------------------------------------------------------------------------
def init_hooks(self):
# hooks holds pointers used for user-side customizations
self.hooks = Struct()
self.strdispatchers = {}
# Set all default hooks, defined in the IPython.hooks module.
hooks = IPython.core.hooks
for hook_name in hooks.__all__:
# default hooks have priority 100, i.e. low; user hooks should have
# 0-100 priority
self.set_hook(hook_name,getattr(hooks,hook_name), 100, _warn_deprecated=False)
if self.display_page:
self.set_hook('show_in_pager', page.as_hook(page.display_page), 90)
def set_hook(self,name,hook, priority=50, str_key=None, re_key=None,
_warn_deprecated=True):
"""set_hook(name,hook) -> sets an internal IPython hook.
IPython exposes some of its internal API as user-modifiable hooks. By
adding your function to one of these hooks, you can modify IPython's
behavior to call at runtime your own routines."""
# At some point in the future, this should validate the hook before it
# accepts it. Probably at least check that the hook takes the number
# of args it's supposed to.
f = types.MethodType(hook,self)
# check if the hook is for strdispatcher first
if str_key is not None:
sdp = self.strdispatchers.get(name, StrDispatch())
sdp.add_s(str_key, f, priority )
self.strdispatchers[name] = sdp
return
if re_key is not None:
sdp = self.strdispatchers.get(name, StrDispatch())
sdp.add_re(re.compile(re_key), f, priority )
self.strdispatchers[name] = sdp
return
dp = getattr(self.hooks, name, None)
if name not in IPython.core.hooks.__all__:
print("Warning! Hook '%s' is not one of %s" % \
(name, IPython.core.hooks.__all__ ))
if _warn_deprecated and (name in IPython.core.hooks.deprecated):
alternative = IPython.core.hooks.deprecated[name]
warn("Hook {} is deprecated. Use {} instead.".format(name, alternative))
if not dp:
dp = IPython.core.hooks.CommandChainDispatcher()
try:
dp.add(f,priority)
except AttributeError:
# it was not commandchain, plain old func - replace
dp = f
setattr(self.hooks,name, dp)
#-------------------------------------------------------------------------
# Things related to events
#-------------------------------------------------------------------------
def init_events(self):
self.events = EventManager(self, available_events)
self.events.register("pre_execute", self._clear_warning_registry)
def register_post_execute(self, func):
"""DEPRECATED: Use ip.events.register('post_run_cell', func)
Register a function for calling after code execution.
"""
warn("ip.register_post_execute is deprecated, use "
"ip.events.register('post_run_cell', func) instead.")
self.events.register('post_run_cell', func)
def _clear_warning_registry(self):
# clear the warning registry, so that different code blocks with
# overlapping line number ranges don't cause spurious suppression of
# warnings (see gh-6611 for details)
if "__warningregistry__" in self.user_global_ns:
del self.user_global_ns["__warningregistry__"]
#-------------------------------------------------------------------------
# Things related to the "main" module
#-------------------------------------------------------------------------
def new_main_mod(self, filename, modname):
"""Return a new 'main' module object for user code execution.
``filename`` should be the path of the script which will be run in the
module. Requests with the same filename will get the same module, with
its namespace cleared.
``modname`` should be the module name - normally either '__main__' or
the basename of the file without the extension.
When scripts are executed via %run, we must keep a reference to their
__main__ module around so that Python doesn't
clear it, rendering references to module globals useless.
This method keeps said reference in a private dict, keyed by the
absolute path of the script. This way, for multiple executions of the
same script we only keep one copy of the namespace (the last one),
thus preventing memory leaks from old references while allowing the
objects from the last execution to be accessible.
"""
filename = os.path.abspath(filename)
try:
main_mod = self._main_mod_cache[filename]
except KeyError:
main_mod = self._main_mod_cache[filename] = types.ModuleType(
py3compat.cast_bytes_py2(modname),
doc="Module created for script run in IPython")
else:
main_mod.__dict__.clear()
main_mod.__name__ = modname
main_mod.__file__ = filename
# It seems pydoc (and perhaps others) needs any module instance to
# implement a __nonzero__ method
main_mod.__nonzero__ = lambda : True
return main_mod
def clear_main_mod_cache(self):
"""Clear the cache of main modules.
Mainly for use by utilities like %reset.
Examples
--------
In [15]: import IPython
In [16]: m = _ip.new_main_mod(IPython.__file__, 'IPython')
In [17]: len(_ip._main_mod_cache) > 0
Out[17]: True
In [18]: _ip.clear_main_mod_cache()
In [19]: len(_ip._main_mod_cache) == 0
Out[19]: True
"""
self._main_mod_cache.clear()
#-------------------------------------------------------------------------
# Things related to debugging
#-------------------------------------------------------------------------
def init_pdb(self):
# Set calling of pdb on exceptions
# self.call_pdb is a property
self.call_pdb = self.pdb
def _get_call_pdb(self):
return self._call_pdb
def _set_call_pdb(self,val):
if val not in (0,1,False,True):
raise ValueError('new call_pdb value must be boolean')
# store value in instance
self._call_pdb = val
# notify the actual exception handlers
self.InteractiveTB.call_pdb = val
call_pdb = property(_get_call_pdb,_set_call_pdb,None,
'Control auto-activation of pdb at exceptions')
def debugger(self,force=False):
"""Call the pydb/pdb debugger.
Keywords:
- force(False): by default, this routine checks the instance call_pdb
flag and does not actually invoke the debugger if the flag is false.
The 'force' option forces the debugger to activate even if the flag
is false.
"""
if not (force or self.call_pdb):
return
if not hasattr(sys,'last_traceback'):
error('No traceback has been produced, nothing to debug.')
return
# use pydb if available
if debugger.has_pydb:
from pydb import pm
else:
# fallback to our internal debugger
pm = lambda : self.InteractiveTB.debugger(force=True)
with self.readline_no_record:
pm()
#-------------------------------------------------------------------------
# Things related to IPython's various namespaces
#-------------------------------------------------------------------------
default_user_namespaces = True
def init_create_namespaces(self, user_module=None, user_ns=None):
# Create the namespace where the user will operate. user_ns is
# normally the only one used, and it is passed to the exec calls as
# the locals argument. But we do carry a user_global_ns namespace
# given as the exec 'globals' argument, This is useful in embedding
# situations where the ipython shell opens in a context where the
# distinction between locals and globals is meaningful. For
# non-embedded contexts, it is just the same object as the user_ns dict.
# FIXME. For some strange reason, __builtins__ is showing up at user
# level as a dict instead of a module. This is a manual fix, but I
# should really track down where the problem is coming from. Alex
# Schmolck reported this problem first.
# A useful post by Alex Martelli on this topic:
# Re: inconsistent value from __builtins__
# Von: Alex Martelli <[email protected]>
# Datum: Freitag 01 Oktober 2004 04:45:34 nachmittags/abends
# Gruppen: comp.lang.python
# Michael Hohn <[email protected]> wrote:
# > >>> print type(builtin_check.get_global_binding('__builtins__'))
# > <type 'dict'>
# > >>> print type(__builtins__)
# > <type 'module'>
# > Is this difference in return value intentional?
# Well, it's documented that '__builtins__' can be either a dictionary
# or a module, and it's been that way for a long time. Whether it's
# intentional (or sensible), I don't know. In any case, the idea is
# that if you need to access the built-in namespace directly, you
# should start with "import __builtin__" (note, no 's') which will
# definitely give you a module. Yeah, it's somewhat confusing:-(.
# These routines return a properly built module and dict as needed by
# the rest of the code, and can also be used by extension writers to
# generate properly initialized namespaces.
if (user_ns is not None) or (user_module is not None):
self.default_user_namespaces = False
self.user_module, self.user_ns = self.prepare_user_module(user_module, user_ns)
# A record of hidden variables we have added to the user namespace, so
# we can list later only variables defined in actual interactive use.
self.user_ns_hidden = {}
# Now that FakeModule produces a real module, we've run into a nasty
# problem: after script execution (via %run), the module where the user
# code ran is deleted. Now that this object is a true module (needed
# so doctest and other tools work correctly), the Python module
# teardown mechanism runs over it, and sets to None every variable
# present in that module. Top-level references to objects from the
# script survive, because the user_ns is updated with them. However,
# calling functions defined in the script that use other things from
# the script will fail, because the function's closure had references
# to the original objects, which are now all None. So we must protect
# these modules from deletion by keeping a cache.
#
# To avoid keeping stale modules around (we only need the one from the
# last run), we use a dict keyed with the full path to the script, so
# only the last version of the module is held in the cache. Note,
# however, that we must cache the module *namespace contents* (their
# __dict__). Because if we try to cache the actual modules, old ones
# (uncached) could be destroyed while still holding references (such as
# those held by GUI objects that tend to be long-lived)>
#
# The %reset command will flush this cache. See the cache_main_mod()
# and clear_main_mod_cache() methods for details on use.
# This is the cache used for 'main' namespaces
self._main_mod_cache = {}
# A table holding all the namespaces IPython deals with, so that
# introspection facilities can search easily.
self.ns_table = {'user_global':self.user_module.__dict__,
'user_local':self.user_ns,
'builtin':builtin_mod.__dict__
}
@property
def user_global_ns(self):
return self.user_module.__dict__
def prepare_user_module(self, user_module=None, user_ns=None):
"""Prepare the module and namespace in which user code will be run.
When IPython is started normally, both parameters are None: a new module
is created automatically, and its __dict__ used as the namespace.
If only user_module is provided, its __dict__ is used as the namespace.
If only user_ns is provided, a dummy module is created, and user_ns
becomes the global namespace. If both are provided (as they may be
when embedding), user_ns is the local namespace, and user_module
provides the global namespace.
Parameters
----------
user_module : module, optional
The current user module in which IPython is being run. If None,
a clean module will be created.
user_ns : dict, optional
A namespace in which to run interactive commands.
Returns
-------
A tuple of user_module and user_ns, each properly initialised.
"""
if user_module is None and user_ns is not None:
user_ns.setdefault("__name__", "__main__")
user_module = DummyMod()
user_module.__dict__ = user_ns
if user_module is None:
user_module = types.ModuleType("__main__",
doc="Automatically created module for IPython interactive environment")
# We must ensure that __builtin__ (without the final 's') is always
# available and pointing to the __builtin__ *module*. For more details:
# http://mail.python.org/pipermail/python-dev/2001-April/014068.html
user_module.__dict__.setdefault('__builtin__', builtin_mod)
user_module.__dict__.setdefault('__builtins__', builtin_mod)
if user_ns is None:
user_ns = user_module.__dict__
return user_module, user_ns
def init_sys_modules(self):
# We need to insert into sys.modules something that looks like a
# module but which accesses the IPython namespace, for shelve and
# pickle to work interactively. Normally they rely on getting
# everything out of __main__, but for embedding purposes each IPython
# instance has its own private namespace, so we can't go shoving
# everything into __main__.
# note, however, that we should only do this for non-embedded
# ipythons, which really mimic the __main__.__dict__ with their own
# namespace. Embedded instances, on the other hand, should not do
# this because they need to manage the user local/global namespaces
# only, but they live within a 'normal' __main__ (meaning, they
# shouldn't overtake the execution environment of the script they're
# embedded in).
# This is overridden in the InteractiveShellEmbed subclass to a no-op.
main_name = self.user_module.__name__
sys.modules[main_name] = self.user_module
def init_user_ns(self):
"""Initialize all user-visible namespaces to their minimum defaults.
Certain history lists are also initialized here, as they effectively
act as user namespaces.
Notes
-----
All data structures here are only filled in, they are NOT reset by this
method. If they were not empty before, data will simply be added to
therm.
"""
# This function works in two parts: first we put a few things in
# user_ns, and we sync that contents into user_ns_hidden so that these
# initial variables aren't shown by %who. After the sync, we add the
# rest of what we *do* want the user to see with %who even on a new
# session (probably nothing, so they really only see their own stuff)
# The user dict must *always* have a __builtin__ reference to the
# Python standard __builtin__ namespace, which must be imported.
# This is so that certain operations in prompt evaluation can be
# reliably executed with builtins. Note that we can NOT use
# __builtins__ (note the 's'), because that can either be a dict or a
# module, and can even mutate at runtime, depending on the context
# (Python makes no guarantees on it). In contrast, __builtin__ is
# always a module object, though it must be explicitly imported.
# For more details:
# http://mail.python.org/pipermail/python-dev/2001-April/014068.html
ns = dict()
# make global variables for user access to the histories
ns['_ih'] = self.history_manager.input_hist_parsed
ns['_oh'] = self.history_manager.output_hist
ns['_dh'] = self.history_manager.dir_hist
ns['_sh'] = shadowns
# user aliases to input and output histories. These shouldn't show up
# in %who, as they can have very large reprs.
ns['In'] = self.history_manager.input_hist_parsed
ns['Out'] = self.history_manager.output_hist
# Store myself as the public api!!!
ns['get_ipython'] = self.get_ipython
ns['exit'] = self.exiter
ns['quit'] = self.exiter
# Sync what we've added so far to user_ns_hidden so these aren't seen
# by %who
self.user_ns_hidden.update(ns)
# Anything put into ns now would show up in %who. Think twice before
# putting anything here, as we really want %who to show the user their
# stuff, not our variables.
# Finally, update the real user's namespace
self.user_ns.update(ns)
@property
def all_ns_refs(self):
"""Get a list of references to all the namespace dictionaries in which
IPython might store a user-created object.
Note that this does not include the displayhook, which also caches
objects from the output."""
return [self.user_ns, self.user_global_ns, self.user_ns_hidden] + \
[m.__dict__ for m in self._main_mod_cache.values()]
def reset(self, new_session=True):
"""Clear all internal namespaces, and attempt to release references to
user objects.
If new_session is True, a new history session will be opened.
"""
# Clear histories
self.history_manager.reset(new_session)
# Reset counter used to index all histories
if new_session:
self.execution_count = 1
# Flush cached output items
if self.displayhook.do_full_cache:
self.displayhook.flush()
# The main execution namespaces must be cleared very carefully,
# skipping the deletion of the builtin-related keys, because doing so
# would cause errors in many object's __del__ methods.
if self.user_ns is not self.user_global_ns:
self.user_ns.clear()
ns = self.user_global_ns
drop_keys = set(ns.keys())
drop_keys.discard('__builtin__')
drop_keys.discard('__builtins__')
drop_keys.discard('__name__')
for k in drop_keys:
del ns[k]
self.user_ns_hidden.clear()
# Restore the user namespaces to minimal usability
self.init_user_ns()
# Restore the default and user aliases
self.alias_manager.clear_aliases()
self.alias_manager.init_aliases()
# Flush the private list of module references kept for script
# execution protection
self.clear_main_mod_cache()
def del_var(self, varname, by_name=False):
"""Delete a variable from the various namespaces, so that, as
far as possible, we're not keeping any hidden references to it.
Parameters
----------
varname : str
The name of the variable to delete.
by_name : bool
If True, delete variables with the given name in each
namespace. If False (default), find the variable in the user
namespace, and delete references to it.
"""
if varname in ('__builtin__', '__builtins__'):
raise ValueError("Refusing to delete %s" % varname)
ns_refs = self.all_ns_refs
if by_name: # Delete by name
for ns in ns_refs:
try:
del ns[varname]
except KeyError:
pass
else: # Delete by object
try:
obj = self.user_ns[varname]
except KeyError:
raise NameError("name '%s' is not defined" % varname)
# Also check in output history
ns_refs.append(self.history_manager.output_hist)
for ns in ns_refs:
to_delete = [n for n, o in iteritems(ns) if o is obj]
for name in to_delete:
del ns[name]
# displayhook keeps extra references, but not in a dictionary
for name in ('_', '__', '___'):
if getattr(self.displayhook, name) is obj:
setattr(self.displayhook, name, None)
def reset_selective(self, regex=None):
"""Clear selective variables from internal namespaces based on a
specified regular expression.
Parameters
----------
regex : string or compiled pattern, optional
A regular expression pattern that will be used in searching
variable names in the users namespaces.
"""
if regex is not None:
try:
m = re.compile(regex)
except TypeError:
raise TypeError('regex must be a string or compiled pattern')
# Search for keys in each namespace that match the given regex
# If a match is found, delete the key/value pair.
for ns in self.all_ns_refs:
for var in ns:
if m.search(var):
del ns[var]
def push(self, variables, interactive=True):
"""Inject a group of variables into the IPython user namespace.
Parameters
----------
variables : dict, str or list/tuple of str
The variables to inject into the user's namespace. If a dict, a
simple update is done. If a str, the string is assumed to have
variable names separated by spaces. A list/tuple of str can also
be used to give the variable names. If just the variable names are
give (list/tuple/str) then the variable values looked up in the
callers frame.
interactive : bool
If True (default), the variables will be listed with the ``who``
magic.
"""
vdict = None
# We need a dict of name/value pairs to do namespace updates.
if isinstance(variables, dict):
vdict = variables
elif isinstance(variables, string_types+(list, tuple)):
if isinstance(variables, string_types):
vlist = variables.split()
else:
vlist = variables
vdict = {}
cf = sys._getframe(1)
for name in vlist:
try:
vdict[name] = eval(name, cf.f_globals, cf.f_locals)
except:
print('Could not get variable %s from %s' %
(name,cf.f_code.co_name))
else:
raise ValueError('variables must be a dict/str/list/tuple')
# Propagate variables to user namespace
self.user_ns.update(vdict)
# And configure interactive visibility
user_ns_hidden = self.user_ns_hidden
if interactive:
for name in vdict:
user_ns_hidden.pop(name, None)
else:
user_ns_hidden.update(vdict)
def drop_by_id(self, variables):
"""Remove a dict of variables from the user namespace, if they are the
same as the values in the dictionary.
This is intended for use by extensions: variables that they've added can
be taken back out if they are unloaded, without removing any that the
user has overwritten.
Parameters
----------
variables : dict
A dictionary mapping object names (as strings) to the objects.
"""
for name, obj in iteritems(variables):
if name in self.user_ns and self.user_ns[name] is obj:
del self.user_ns[name]
self.user_ns_hidden.pop(name, None)
#-------------------------------------------------------------------------
# Things related to object introspection
#-------------------------------------------------------------------------
def _ofind(self, oname, namespaces=None):
"""Find an object in the available namespaces.
self._ofind(oname) -> dict with keys: found,obj,ospace,ismagic
Has special code to detect magic functions.
"""
oname = oname.strip()
#print '1- oname: <%r>' % oname # dbg
if not oname.startswith(ESC_MAGIC) and \
not oname.startswith(ESC_MAGIC2) and \
not py3compat.isidentifier(oname, dotted=True):
return dict(found=False)
if namespaces is None:
# Namespaces to search in:
# Put them in a list. The order is important so that we
# find things in the same order that Python finds them.
namespaces = [ ('Interactive', self.user_ns),
('Interactive (global)', self.user_global_ns),
('Python builtin', builtin_mod.__dict__),
]
# initialize results to 'null'
found = False; obj = None; ospace = None;
ismagic = False; isalias = False; parent = None
# We need to special-case 'print', which as of python2.6 registers as a
# function but should only be treated as one if print_function was
# loaded with a future import. In this case, just bail.
if (oname == 'print' and not py3compat.PY3 and not \
(self.compile.compiler_flags & __future__.CO_FUTURE_PRINT_FUNCTION)):
return {'found':found, 'obj':obj, 'namespace':ospace,
'ismagic':ismagic, 'isalias':isalias, 'parent':parent}
# Look for the given name by splitting it in parts. If the head is
# found, then we look for all the remaining parts as members, and only
# declare success if we can find them all.
oname_parts = oname.split('.')
oname_head, oname_rest = oname_parts[0],oname_parts[1:]
for nsname,ns in namespaces:
try:
obj = ns[oname_head]
except KeyError:
continue
else:
#print 'oname_rest:', oname_rest # dbg
for idx, part in enumerate(oname_rest):
try:
parent = obj
# The last part is looked up in a special way to avoid
# descriptor invocation as it may raise or have side
# effects.
if idx == len(oname_rest) - 1:
obj = self._getattr_property(obj, part)
else:
obj = getattr(obj, part)
except:
# Blanket except b/c some badly implemented objects
# allow __getattr__ to raise exceptions other than
# AttributeError, which then crashes IPython.
break
else:
# If we finish the for loop (no break), we got all members
found = True
ospace = nsname
break # namespace loop
# Try to see if it's magic
if not found:
obj = None
if oname.startswith(ESC_MAGIC2):
oname = oname.lstrip(ESC_MAGIC2)
obj = self.find_cell_magic(oname)
elif oname.startswith(ESC_MAGIC):
oname = oname.lstrip(ESC_MAGIC)
obj = self.find_line_magic(oname)
else:
# search without prefix, so run? will find %run?
obj = self.find_line_magic(oname)
if obj is None:
obj = self.find_cell_magic(oname)
if obj is not None:
found = True
ospace = 'IPython internal'
ismagic = True
isalias = isinstance(obj, Alias)
# Last try: special-case some literals like '', [], {}, etc:
if not found and oname_head in ["''",'""','[]','{}','()']:
obj = eval(oname_head)
found = True
ospace = 'Interactive'
return {'found':found, 'obj':obj, 'namespace':ospace,
'ismagic':ismagic, 'isalias':isalias, 'parent':parent}
@staticmethod
def _getattr_property(obj, attrname):
"""Property-aware getattr to use in object finding.
If attrname represents a property, return it unevaluated (in case it has
side effects or raises an error.
"""
if not isinstance(obj, type):
try:
# `getattr(type(obj), attrname)` is not guaranteed to return
# `obj`, but does so for property:
#
# property.__get__(self, None, cls) -> self
#
# The universal alternative is to traverse the mro manually
# searching for attrname in class dicts.
attr = getattr(type(obj), attrname)
except AttributeError:
pass
else:
# This relies on the fact that data descriptors (with both
# __get__ & __set__ magic methods) take precedence over
# instance-level attributes:
#
# class A(object):
# @property
# def foobar(self): return 123
# a = A()
# a.__dict__['foobar'] = 345
# a.foobar # == 123
#
# So, a property may be returned right away.
if isinstance(attr, property):
return attr
# Nothing helped, fall back.
return getattr(obj, attrname)
def _object_find(self, oname, namespaces=None):
"""Find an object and return a struct with info about it."""
return Struct(self._ofind(oname, namespaces))
def _inspect(self, meth, oname, namespaces=None, **kw):
"""Generic interface to the inspector system.
This function is meant to be called by pdef, pdoc & friends."""
info = self._object_find(oname, namespaces)
if info.found:
pmethod = getattr(self.inspector, meth)
formatter = format_screen if info.ismagic else None
if meth == 'pdoc':
pmethod(info.obj, oname, formatter)
elif meth == 'pinfo':
pmethod(info.obj, oname, formatter, info, **kw)
else:
pmethod(info.obj, oname)
else:
print('Object `%s` not found.' % oname)
return 'not found' # so callers can take other action
def object_inspect(self, oname, detail_level=0):
"""Get object info about oname"""
with self.builtin_trap:
info = self._object_find(oname)
if info.found:
return self.inspector.info(info.obj, oname, info=info,
detail_level=detail_level
)
else:
return oinspect.object_info(name=oname, found=False)
def object_inspect_text(self, oname, detail_level=0):
"""Get object info as formatted text"""
with self.builtin_trap:
info = self._object_find(oname)
if info.found:
return self.inspector._format_info(info.obj, oname, info=info,
detail_level=detail_level
)
else:
raise KeyError(oname)
#-------------------------------------------------------------------------
# Things related to history management
#-------------------------------------------------------------------------
def init_history(self):
"""Sets up the command history, and starts regular autosaves."""
self.history_manager = HistoryManager(shell=self, parent=self)
self.configurables.append(self.history_manager)
#-------------------------------------------------------------------------
# Things related to exception handling and tracebacks (not debugging)
#-------------------------------------------------------------------------
def init_traceback_handlers(self, custom_exceptions):
# Syntax error handler.
self.SyntaxTB = ultratb.SyntaxTB(color_scheme='NoColor')
# The interactive one is initialized with an offset, meaning we always
# want to remove the topmost item in the traceback, which is our own
# internal code. Valid modes: ['Plain','Context','Verbose']
self.InteractiveTB = ultratb.AutoFormattedTB(mode = 'Plain',
color_scheme='NoColor',
tb_offset = 1,
check_cache=check_linecache_ipython)
# The instance will store a pointer to the system-wide exception hook,
# so that runtime code (such as magics) can access it. This is because
# during the read-eval loop, it may get temporarily overwritten.
self.sys_excepthook = sys.excepthook
# and add any custom exception handlers the user may have specified
self.set_custom_exc(*custom_exceptions)
# Set the exception mode
self.InteractiveTB.set_mode(mode=self.xmode)
def set_custom_exc(self, exc_tuple, handler):
"""set_custom_exc(exc_tuple,handler)
Set a custom exception handler, which will be called if any of the
exceptions in exc_tuple occur in the mainloop (specifically, in the
run_code() method).
Parameters
----------
exc_tuple : tuple of exception classes
A *tuple* of exception classes, for which to call the defined
handler. It is very important that you use a tuple, and NOT A
LIST here, because of the way Python's except statement works. If
you only want to trap a single exception, use a singleton tuple::
exc_tuple == (MyCustomException,)
handler : callable
handler must have the following signature::
def my_handler(self, etype, value, tb, tb_offset=None):
...
return structured_traceback
Your handler must return a structured traceback (a list of strings),
or None.
This will be made into an instance method (via types.MethodType)
of IPython itself, and it will be called if any of the exceptions
listed in the exc_tuple are caught. If the handler is None, an
internal basic one is used, which just prints basic info.
To protect IPython from crashes, if your handler ever raises an
exception or returns an invalid result, it will be immediately
disabled.
WARNING: by putting in your own exception handler into IPython's main
execution loop, you run a very good chance of nasty crashes. This
facility should only be used if you really know what you are doing."""
assert type(exc_tuple)==type(()) , \
"The custom exceptions must be given AS A TUPLE."
def dummy_handler(self,etype,value,tb,tb_offset=None):
print('*** Simple custom exception handler ***')
print('Exception type :',etype)
print('Exception value:',value)
print('Traceback :',tb)
#print 'Source code :','\n'.join(self.buffer)
def validate_stb(stb):
"""validate structured traceback return type
return type of CustomTB *should* be a list of strings, but allow
single strings or None, which are harmless.
This function will *always* return a list of strings,
and will raise a TypeError if stb is inappropriate.
"""
msg = "CustomTB must return list of strings, not %r" % stb
if stb is None:
return []
elif isinstance(stb, string_types):
return [stb]
elif not isinstance(stb, list):
raise TypeError(msg)
# it's a list
for line in stb:
# check every element
if not isinstance(line, string_types):
raise TypeError(msg)
return stb
if handler is None:
wrapped = dummy_handler
else:
def wrapped(self,etype,value,tb,tb_offset=None):
"""wrap CustomTB handler, to protect IPython from user code
This makes it harder (but not impossible) for custom exception
handlers to crash IPython.
"""
try:
stb = handler(self,etype,value,tb,tb_offset=tb_offset)
return validate_stb(stb)
except:
# clear custom handler immediately
self.set_custom_exc((), None)
print("Custom TB Handler failed, unregistering", file=io.stderr)
# show the exception in handler first
stb = self.InteractiveTB.structured_traceback(*sys.exc_info())
print(self.InteractiveTB.stb2text(stb), file=io.stdout)
print("The original exception:", file=io.stdout)
stb = self.InteractiveTB.structured_traceback(
(etype,value,tb), tb_offset=tb_offset
)
return stb
self.CustomTB = types.MethodType(wrapped,self)
self.custom_exceptions = exc_tuple
def excepthook(self, etype, value, tb):
"""One more defense for GUI apps that call sys.excepthook.
GUI frameworks like wxPython trap exceptions and call
sys.excepthook themselves. I guess this is a feature that
enables them to keep running after exceptions that would
otherwise kill their mainloop. This is a bother for IPython
which excepts to catch all of the program exceptions with a try:
except: statement.
Normally, IPython sets sys.excepthook to a CrashHandler instance, so if
any app directly invokes sys.excepthook, it will look to the user like
IPython crashed. In order to work around this, we can disable the
CrashHandler and replace it with this excepthook instead, which prints a
regular traceback using our InteractiveTB. In this fashion, apps which
call sys.excepthook will generate a regular-looking exception from
IPython, and the CrashHandler will only be triggered by real IPython
crashes.
This hook should be used sparingly, only in places which are not likely
to be true IPython errors.
"""
self.showtraceback((etype, value, tb), tb_offset=0)
def _get_exc_info(self, exc_tuple=None):
"""get exc_info from a given tuple, sys.exc_info() or sys.last_type etc.
Ensures sys.last_type,value,traceback hold the exc_info we found,
from whichever source.
raises ValueError if none of these contain any information
"""
if exc_tuple is None:
etype, value, tb = sys.exc_info()
else:
etype, value, tb = exc_tuple
if etype is None:
if hasattr(sys, 'last_type'):
etype, value, tb = sys.last_type, sys.last_value, \
sys.last_traceback
if etype is None:
raise ValueError("No exception to find")
# Now store the exception info in sys.last_type etc.
# WARNING: these variables are somewhat deprecated and not
# necessarily safe to use in a threaded environment, but tools
# like pdb depend on their existence, so let's set them. If we
# find problems in the field, we'll need to revisit their use.
sys.last_type = etype
sys.last_value = value
sys.last_traceback = tb
return etype, value, tb
def show_usage_error(self, exc):
"""Show a short message for UsageErrors
These are special exceptions that shouldn't show a traceback.
"""
self.write_err("UsageError: %s" % exc)
def get_exception_only(self, exc_tuple=None):
"""
Return as a string (ending with a newline) the exception that
just occurred, without any traceback.
"""
etype, value, tb = self._get_exc_info(exc_tuple)
msg = traceback.format_exception_only(etype, value)
return ''.join(msg)
def showtraceback(self, exc_tuple=None, filename=None, tb_offset=None,
exception_only=False):
"""Display the exception that just occurred.
If nothing is known about the exception, this is the method which
should be used throughout the code for presenting user tracebacks,
rather than directly invoking the InteractiveTB object.
A specific showsyntaxerror() also exists, but this method can take
care of calling it if needed, so unless you are explicitly catching a
SyntaxError exception, don't try to analyze the stack manually and
simply call this method."""
try:
try:
etype, value, tb = self._get_exc_info(exc_tuple)
except ValueError:
self.write_err('No traceback available to show.\n')
return
if issubclass(etype, SyntaxError):
# Though this won't be called by syntax errors in the input
# line, there may be SyntaxError cases with imported code.
self.showsyntaxerror(filename)
elif etype is UsageError:
self.show_usage_error(value)
else:
if exception_only:
stb = ['An exception has occurred, use %tb to see '
'the full traceback.\n']
stb.extend(self.InteractiveTB.get_exception_only(etype,
value))
else:
try:
# Exception classes can customise their traceback - we
# use this in IPython.parallel for exceptions occurring
# in the engines. This should return a list of strings.
stb = value._render_traceback_()
except Exception:
stb = self.InteractiveTB.structured_traceback(etype,
value, tb, tb_offset=tb_offset)
self._showtraceback(etype, value, stb)
if self.call_pdb:
# drop into debugger
self.debugger(force=True)
return
# Actually show the traceback
self._showtraceback(etype, value, stb)
except KeyboardInterrupt:
self.write_err('\n' + self.get_exception_only())
def _showtraceback(self, etype, evalue, stb):
"""Actually show a traceback.
Subclasses may override this method to put the traceback on a different
place, like a side channel.
"""
print(self.InteractiveTB.stb2text(stb), file=io.stdout)
def showsyntaxerror(self, filename=None):
"""Display the syntax error that just occurred.
This doesn't display a stack trace because there isn't one.
If a filename is given, it is stuffed in the exception instead
of what was there before (because Python's parser always uses
"<string>" when reading from a string).
"""
etype, value, last_traceback = self._get_exc_info()
if filename and issubclass(etype, SyntaxError):
try:
value.filename = filename
except:
# Not the format we expect; leave it alone
pass
stb = self.SyntaxTB.structured_traceback(etype, value, [])
self._showtraceback(etype, value, stb)
# This is overridden in TerminalInteractiveShell to show a message about
# the %paste magic.
def showindentationerror(self):
"""Called by run_cell when there's an IndentationError in code entered
at the prompt.
This is overridden in TerminalInteractiveShell to show a message about
the %paste magic."""
self.showsyntaxerror()
#-------------------------------------------------------------------------
# Things related to readline
#-------------------------------------------------------------------------
def init_readline(self):
"""Moved to terminal subclass, here only to simplify the init logic."""
self.readline = None
# Set a number of methods that depend on readline to be no-op
self.readline_no_record = NoOpContext()
self.set_readline_completer = no_op
self.set_custom_completer = no_op
@skip_doctest
def set_next_input(self, s, replace=False):
""" Sets the 'default' input string for the next command line.
Example::
In [1]: _ip.set_next_input("Hello Word")
In [2]: Hello Word_ # cursor is here
"""
self.rl_next_input = py3compat.cast_bytes_py2(s)
def _indent_current_str(self):
"""return the current level of indentation as a string"""
return self.input_splitter.indent_spaces * ' '
#-------------------------------------------------------------------------
# Things related to text completion
#-------------------------------------------------------------------------
def init_completer(self):
"""Initialize the completion machinery.
This creates completion machinery that can be used by client code,
either interactively in-process (typically triggered by the readline
library), programmatically (such as in test suites) or out-of-process
(typically over the network by remote frontends).
"""
from IPython.core.completer import IPCompleter
from IPython.core.completerlib import (module_completer,
magic_run_completer, cd_completer, reset_completer)
self.Completer = IPCompleter(shell=self,
namespace=self.user_ns,
global_namespace=self.user_global_ns,
use_readline=self.has_readline,
parent=self,
)
self.configurables.append(self.Completer)
# Add custom completers to the basic ones built into IPCompleter
sdisp = self.strdispatchers.get('complete_command', StrDispatch())
self.strdispatchers['complete_command'] = sdisp
self.Completer.custom_completers = sdisp
self.set_hook('complete_command', module_completer, str_key = 'import')
self.set_hook('complete_command', module_completer, str_key = 'from')
self.set_hook('complete_command', module_completer, str_key = '%aimport')
self.set_hook('complete_command', magic_run_completer, str_key = '%run')
self.set_hook('complete_command', cd_completer, str_key = '%cd')
self.set_hook('complete_command', reset_completer, str_key = '%reset')
def complete(self, text, line=None, cursor_pos=None):
"""Return the completed text and a list of completions.
Parameters
----------
text : string
A string of text to be completed on. It can be given as empty and
instead a line/position pair are given. In this case, the
completer itself will split the line like readline does.
line : string, optional
The complete line that text is part of.
cursor_pos : int, optional
The position of the cursor on the input line.
Returns
-------
text : string
The actual text that was completed.
matches : list
A sorted list with all possible completions.
The optional arguments allow the completion to take more context into
account, and are part of the low-level completion API.
This is a wrapper around the completion mechanism, similar to what
readline does at the command line when the TAB key is hit. By
exposing it as a method, it can be used by other non-readline
environments (such as GUIs) for text completion.
Simple usage example:
In [1]: x = 'hello'
In [2]: _ip.complete('x.l')
Out[2]: ('x.l', ['x.ljust', 'x.lower', 'x.lstrip'])
"""
# Inject names into __builtin__ so we can complete on the added names.
with self.builtin_trap:
return self.Completer.complete(text, line, cursor_pos)
def set_custom_completer(self, completer, pos=0):
"""Adds a new custom completer function.
The position argument (defaults to 0) is the index in the completers
list where you want the completer to be inserted."""
newcomp = types.MethodType(completer,self.Completer)
self.Completer.matchers.insert(pos,newcomp)
def set_completer_frame(self, frame=None):
"""Set the frame of the completer."""
if frame:
self.Completer.namespace = frame.f_locals
self.Completer.global_namespace = frame.f_globals
else:
self.Completer.namespace = self.user_ns
self.Completer.global_namespace = self.user_global_ns
#-------------------------------------------------------------------------
# Things related to magics
#-------------------------------------------------------------------------
def init_magics(self):
from IPython.core import magics as m
self.magics_manager = magic.MagicsManager(shell=self,
parent=self,
user_magics=m.UserMagics(self))
self.configurables.append(self.magics_manager)
# Expose as public API from the magics manager
self.register_magics = self.magics_manager.register
self.define_magic = self.magics_manager.define_magic
self.register_magics(m.AutoMagics, m.BasicMagics, m.CodeMagics,
m.ConfigMagics, m.DeprecatedMagics, m.DisplayMagics, m.ExecutionMagics,
m.ExtensionMagics, m.HistoryMagics, m.LoggingMagics,
m.NamespaceMagics, m.OSMagics, m.PylabMagics, m.ScriptMagics,
)
# Register Magic Aliases
mman = self.magics_manager
# FIXME: magic aliases should be defined by the Magics classes
# or in MagicsManager, not here
mman.register_alias('ed', 'edit')
mman.register_alias('hist', 'history')
mman.register_alias('rep', 'recall')
mman.register_alias('SVG', 'svg', 'cell')
mman.register_alias('HTML', 'html', 'cell')
mman.register_alias('file', 'writefile', 'cell')
# FIXME: Move the color initialization to the DisplayHook, which
# should be split into a prompt manager and displayhook. We probably
# even need a centralize colors management object.
self.magic('colors %s' % self.colors)
# Defined here so that it's included in the documentation
@functools.wraps(magic.MagicsManager.register_function)
def register_magic_function(self, func, magic_kind='line', magic_name=None):
self.magics_manager.register_function(func,
magic_kind=magic_kind, magic_name=magic_name)
def run_line_magic(self, magic_name, line):
"""Execute the given line magic.
Parameters
----------
magic_name : str
Name of the desired magic function, without '%' prefix.
line : str
The rest of the input line as a single string.
"""
fn = self.find_line_magic(magic_name)
if fn is None:
cm = self.find_cell_magic(magic_name)
etpl = "Line magic function `%%%s` not found%s."
extra = '' if cm is None else (' (But cell magic `%%%%%s` exists, '
'did you mean that instead?)' % magic_name )
error(etpl % (magic_name, extra))
else:
# Note: this is the distance in the stack to the user's frame.
# This will need to be updated if the internal calling logic gets
# refactored, or else we'll be expanding the wrong variables.
stack_depth = 2
magic_arg_s = self.var_expand(line, stack_depth)
# Put magic args in a list so we can call with f(*a) syntax
args = [magic_arg_s]
kwargs = {}
# Grab local namespace if we need it:
if getattr(fn, "needs_local_scope", False):
kwargs['local_ns'] = sys._getframe(stack_depth).f_locals
with self.builtin_trap:
result = fn(*args,**kwargs)
return result
def run_cell_magic(self, magic_name, line, cell):
"""Execute the given cell magic.
Parameters
----------
magic_name : str
Name of the desired magic function, without '%' prefix.
line : str
The rest of the first input line as a single string.
cell : str
The body of the cell as a (possibly multiline) string.
"""
fn = self.find_cell_magic(magic_name)
if fn is None:
lm = self.find_line_magic(magic_name)
etpl = "Cell magic `%%{0}` not found{1}."
extra = '' if lm is None else (' (But line magic `%{0}` exists, '
'did you mean that instead?)'.format(magic_name))
error(etpl.format(magic_name, extra))
elif cell == '':
message = '%%{0} is a cell magic, but the cell body is empty.'.format(magic_name)
if self.find_line_magic(magic_name) is not None:
message += ' Did you mean the line magic %{0} (single %)?'.format(magic_name)
raise UsageError(message)
else:
# Note: this is the distance in the stack to the user's frame.
# This will need to be updated if the internal calling logic gets
# refactored, or else we'll be expanding the wrong variables.
stack_depth = 2
magic_arg_s = self.var_expand(line, stack_depth)
with self.builtin_trap:
result = fn(magic_arg_s, cell)
return result
def find_line_magic(self, magic_name):
"""Find and return a line magic by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics['line'].get(magic_name)
def find_cell_magic(self, magic_name):
"""Find and return a cell magic by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics['cell'].get(magic_name)
def find_magic(self, magic_name, magic_kind='line'):
"""Find and return a magic of the given type by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics[magic_kind].get(magic_name)
def magic(self, arg_s):
"""DEPRECATED. Use run_line_magic() instead.
Call a magic function by name.
Input: a string containing the name of the magic function to call and
any additional arguments to be passed to the magic.
magic('name -opt foo bar') is equivalent to typing at the ipython
prompt:
In[1]: %name -opt foo bar
To call a magic without arguments, simply use magic('name').
This provides a proper Python function to call IPython's magics in any
valid Python code you can type at the interpreter, including loops and
compound statements.
"""
# TODO: should we issue a loud deprecation warning here?
magic_name, _, magic_arg_s = arg_s.partition(' ')
magic_name = magic_name.lstrip(prefilter.ESC_MAGIC)
return self.run_line_magic(magic_name, magic_arg_s)
#-------------------------------------------------------------------------
# Things related to macros
#-------------------------------------------------------------------------
def define_macro(self, name, themacro):
"""Define a new macro
Parameters
----------
name : str
The name of the macro.
themacro : str or Macro
The action to do upon invoking the macro. If a string, a new
Macro object is created by passing the string to it.
"""
from IPython.core import macro
if isinstance(themacro, string_types):
themacro = macro.Macro(themacro)
if not isinstance(themacro, macro.Macro):
raise ValueError('A macro must be a string or a Macro instance.')
self.user_ns[name] = themacro
#-------------------------------------------------------------------------
# Things related to the running of system commands
#-------------------------------------------------------------------------
def system_piped(self, cmd):
"""Call the given cmd in a subprocess, piping stdout/err
Parameters
----------
cmd : str
Command to execute (can not end in '&', as background processes are
not supported. Should not be a command that expects input
other than simple text.
"""
if cmd.rstrip().endswith('&'):
# this is *far* from a rigorous test
# We do not support backgrounding processes because we either use
# pexpect or pipes to read from. Users can always just call
# os.system() or use ip.system=ip.system_raw
# if they really want a background process.
raise OSError("Background processes not supported.")
# we explicitly do NOT return the subprocess status code, because
# a non-None value would trigger :func:`sys.displayhook` calls.
# Instead, we store the exit_code in user_ns.
self.user_ns['_exit_code'] = system(self.var_expand(cmd, depth=1))
def system_raw(self, cmd):
"""Call the given cmd in a subprocess using os.system on Windows or
subprocess.call using the system shell on other platforms.
Parameters
----------
cmd : str
Command to execute.
"""
cmd = self.var_expand(cmd, depth=1)
# protect os.system from UNC paths on Windows, which it can't handle:
if sys.platform == 'win32':
from IPython.utils._process_win32 import AvoidUNCPath
with AvoidUNCPath() as path:
if path is not None:
cmd = '"pushd %s &&"%s' % (path, cmd)
cmd = py3compat.unicode_to_str(cmd)
try:
ec = os.system(cmd)
except KeyboardInterrupt:
self.write_err('\n' + self.get_exception_only())
ec = -2
else:
cmd = py3compat.unicode_to_str(cmd)
# For posix the result of the subprocess.call() below is an exit
# code, which by convention is zero for success, positive for
# program failure. Exit codes above 128 are reserved for signals,
# and the formula for converting a signal to an exit code is usually
# signal_number+128. To more easily differentiate between exit
# codes and signals, ipython uses negative numbers. For instance
# since control-c is signal 2 but exit code 130, ipython's
# _exit_code variable will read -2. Note that some shells like
# csh and fish don't follow sh/bash conventions for exit codes.
executable = os.environ.get('SHELL', None)
try:
# Use env shell instead of default /bin/sh
ec = subprocess.call(cmd, shell=True, executable=executable)
except KeyboardInterrupt:
# intercept control-C; a long traceback is not useful here
self.write_err('\n' + self.get_exception_only())
ec = 130
if ec > 128:
ec = -(ec - 128)
# We explicitly do NOT return the subprocess status code, because
# a non-None value would trigger :func:`sys.displayhook` calls.
# Instead, we store the exit_code in user_ns. Note the semantics
# of _exit_code: for control-c, _exit_code == -signal.SIGNIT,
# but raising SystemExit(_exit_code) will give status 254!
self.user_ns['_exit_code'] = ec
# use piped system by default, because it is better behaved
system = system_piped
def getoutput(self, cmd, split=True, depth=0):
"""Get output (possibly including stderr) from a subprocess.
Parameters
----------
cmd : str
Command to execute (can not end in '&', as background processes are
not supported.
split : bool, optional
If True, split the output into an IPython SList. Otherwise, an
IPython LSString is returned. These are objects similar to normal
lists and strings, with a few convenience attributes for easier
manipulation of line-based output. You can use '?' on them for
details.
depth : int, optional
How many frames above the caller are the local variables which should
be expanded in the command string? The default (0) assumes that the
expansion variables are in the stack frame calling this function.
"""
if cmd.rstrip().endswith('&'):
# this is *far* from a rigorous test
raise OSError("Background processes not supported.")
out = getoutput(self.var_expand(cmd, depth=depth+1))
if split:
out = SList(out.splitlines())
else:
out = LSString(out)
return out
#-------------------------------------------------------------------------
# Things related to aliases
#-------------------------------------------------------------------------
def init_alias(self):
self.alias_manager = AliasManager(shell=self, parent=self)
self.configurables.append(self.alias_manager)
#-------------------------------------------------------------------------
# Things related to extensions
#-------------------------------------------------------------------------
def init_extension_manager(self):
self.extension_manager = ExtensionManager(shell=self, parent=self)
self.configurables.append(self.extension_manager)
#-------------------------------------------------------------------------
# Things related to payloads
#-------------------------------------------------------------------------
def init_payload(self):
self.payload_manager = PayloadManager(parent=self)
self.configurables.append(self.payload_manager)
#-------------------------------------------------------------------------
# Things related to the prefilter
#-------------------------------------------------------------------------
def init_prefilter(self):
self.prefilter_manager = PrefilterManager(shell=self, parent=self)
self.configurables.append(self.prefilter_manager)
# Ultimately this will be refactored in the new interpreter code, but
# for now, we should expose the main prefilter method (there's legacy
# code out there that may rely on this).
self.prefilter = self.prefilter_manager.prefilter_lines
def auto_rewrite_input(self, cmd):
"""Print to the screen the rewritten form of the user's command.
This shows visual feedback by rewriting input lines that cause
automatic calling to kick in, like::
/f x
into::
------> f(x)
after the user's input prompt. This helps the user understand that the
input line was transformed automatically by IPython.
"""
if not self.show_rewritten_input:
return
rw = self.prompt_manager.render('rewrite') + cmd
try:
# plain ascii works better w/ pyreadline, on some machines, so
# we use it and only print uncolored rewrite if we have unicode
rw = str(rw)
print(rw, file=io.stdout)
except UnicodeEncodeError:
print("------> " + cmd)
#-------------------------------------------------------------------------
# Things related to extracting values/expressions from kernel and user_ns
#-------------------------------------------------------------------------
def _user_obj_error(self):
"""return simple exception dict
for use in user_expressions
"""
etype, evalue, tb = self._get_exc_info()
stb = self.InteractiveTB.get_exception_only(etype, evalue)
exc_info = {
u'status' : 'error',
u'traceback' : stb,
u'ename' : unicode_type(etype.__name__),
u'evalue' : py3compat.safe_unicode(evalue),
}
return exc_info
def _format_user_obj(self, obj):
"""format a user object to display dict
for use in user_expressions
"""
data, md = self.display_formatter.format(obj)
value = {
'status' : 'ok',
'data' : data,
'metadata' : md,
}
return value
def user_expressions(self, expressions):
"""Evaluate a dict of expressions in the user's namespace.
Parameters
----------
expressions : dict
A dict with string keys and string values. The expression values
should be valid Python expressions, each of which will be evaluated
in the user namespace.
Returns
-------
A dict, keyed like the input expressions dict, with the rich mime-typed
display_data of each value.
"""
out = {}
user_ns = self.user_ns
global_ns = self.user_global_ns
for key, expr in iteritems(expressions):
try:
value = self._format_user_obj(eval(expr, global_ns, user_ns))
except:
value = self._user_obj_error()
out[key] = value
return out
#-------------------------------------------------------------------------
# Things related to the running of code
#-------------------------------------------------------------------------
def ex(self, cmd):
"""Execute a normal python statement in user namespace."""
with self.builtin_trap:
exec(cmd, self.user_global_ns, self.user_ns)
def ev(self, expr):
"""Evaluate python expression expr in user namespace.
Returns the result of evaluation
"""
with self.builtin_trap:
return eval(expr, self.user_global_ns, self.user_ns)
def safe_execfile(self, fname, *where, **kw):
"""A safe version of the builtin execfile().
This version will never throw an exception, but instead print
helpful error messages to the screen. This only works on pure
Python files with the .py extension.
Parameters
----------
fname : string
The name of the file to be executed.
where : tuple
One or two namespaces, passed to execfile() as (globals,locals).
If only one is given, it is passed as both.
exit_ignore : bool (False)
If True, then silence SystemExit for non-zero status (it is always
silenced for zero status, as it is so common).
raise_exceptions : bool (False)
If True raise exceptions everywhere. Meant for testing.
shell_futures : bool (False)
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
"""
kw.setdefault('exit_ignore', False)
kw.setdefault('raise_exceptions', False)
kw.setdefault('shell_futures', False)
fname = os.path.abspath(os.path.expanduser(fname))
# Make sure we can open the file
try:
with open(fname):
pass
except:
warn('Could not open file <%s> for safe execution.' % fname)
return
# Find things also in current directory. This is needed to mimic the
# behavior of running a script from the system command line, where
# Python inserts the script's directory into sys.path
dname = os.path.dirname(fname)
with prepended_to_syspath(dname):
try:
glob, loc = (where + (None, ))[:2]
py3compat.execfile(
fname, glob, loc,
self.compile if kw['shell_futures'] else None)
except SystemExit as status:
# If the call was made with 0 or None exit status (sys.exit(0)
# or sys.exit() ), don't bother showing a traceback, as both of
# these are considered normal by the OS:
# > python -c'import sys;sys.exit(0)'; echo $?
# 0
# > python -c'import sys;sys.exit()'; echo $?
# 0
# For other exit status, we show the exception unless
# explicitly silenced, but only in short form.
if status.code:
if kw['raise_exceptions']:
raise
if not kw['exit_ignore']:
self.showtraceback(exception_only=True)
except:
if kw['raise_exceptions']:
raise
# tb offset is 2 because we wrap execfile
self.showtraceback(tb_offset=2)
def safe_execfile_ipy(self, fname, shell_futures=False, raise_exceptions=False):
"""Like safe_execfile, but for .ipy or .ipynb files with IPython syntax.
Parameters
----------
fname : str
The name of the file to execute. The filename must have a
.ipy or .ipynb extension.
shell_futures : bool (False)
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
raise_exceptions : bool (False)
If True raise exceptions everywhere. Meant for testing.
"""
fname = os.path.abspath(os.path.expanduser(fname))
# Make sure we can open the file
try:
with open(fname):
pass
except:
warn('Could not open file <%s> for safe execution.' % fname)
return
# Find things also in current directory. This is needed to mimic the
# behavior of running a script from the system command line, where
# Python inserts the script's directory into sys.path
dname = os.path.dirname(fname)
def get_cells():
"""generator for sequence of code blocks to run"""
if fname.endswith('.ipynb'):
from nbformat import read
with io_open(fname) as f:
nb = read(f, as_version=4)
if not nb.cells:
return
for cell in nb.cells:
if cell.cell_type == 'code':
yield cell.source
else:
with open(fname) as f:
yield f.read()
with prepended_to_syspath(dname):
try:
for cell in get_cells():
result = self.run_cell(cell, silent=True, shell_futures=shell_futures)
if raise_exceptions:
result.raise_error()
elif not result.success:
break
except:
if raise_exceptions:
raise
self.showtraceback()
warn('Unknown failure executing file: <%s>' % fname)
def safe_run_module(self, mod_name, where):
"""A safe version of runpy.run_module().
This version will never throw an exception, but instead print
helpful error messages to the screen.
`SystemExit` exceptions with status code 0 or None are ignored.
Parameters
----------
mod_name : string
The name of the module to be executed.
where : dict
The globals namespace.
"""
try:
try:
where.update(
runpy.run_module(str(mod_name), run_name="__main__",
alter_sys=True)
)
except SystemExit as status:
if status.code:
raise
except:
self.showtraceback()
warn('Unknown failure executing module: <%s>' % mod_name)
def run_cell(self, raw_cell, store_history=False, silent=False, shell_futures=True):
"""Run a complete IPython cell.
Parameters
----------
raw_cell : str
The code (including IPython code such as %magic functions) to run.
store_history : bool
If True, the raw and translated cell will be stored in IPython's
history. For user code calling back into IPython's machinery, this
should be set to False.
silent : bool
If True, avoid side-effects, such as implicit displayhooks and
and logging. silent=True forces store_history=False.
shell_futures : bool
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
Returns
-------
result : :class:`ExecutionResult`
"""
result = ExecutionResult()
if (not raw_cell) or raw_cell.isspace():
return result
if silent:
store_history = False
if store_history:
result.execution_count = self.execution_count
def error_before_exec(value):
result.error_before_exec = value
return result
self.events.trigger('pre_execute')
if not silent:
self.events.trigger('pre_run_cell')
# If any of our input transformation (input_transformer_manager or
# prefilter_manager) raises an exception, we store it in this variable
# so that we can display the error after logging the input and storing
# it in the history.
preprocessing_exc_tuple = None
try:
# Static input transformations
cell = self.input_transformer_manager.transform_cell(raw_cell)
except SyntaxError:
preprocessing_exc_tuple = sys.exc_info()
cell = raw_cell # cell has to exist so it can be stored/logged
else:
if len(cell.splitlines()) == 1:
# Dynamic transformations - only applied for single line commands
with self.builtin_trap:
try:
# use prefilter_lines to handle trailing newlines
# restore trailing newline for ast.parse
cell = self.prefilter_manager.prefilter_lines(cell) + '\n'
except Exception:
# don't allow prefilter errors to crash IPython
preprocessing_exc_tuple = sys.exc_info()
# Store raw and processed history
if store_history:
self.history_manager.store_inputs(self.execution_count,
cell, raw_cell)
if not silent:
self.logger.log(cell, raw_cell)
# Display the exception if input processing failed.
if preprocessing_exc_tuple is not None:
self.showtraceback(preprocessing_exc_tuple)
if store_history:
self.execution_count += 1
return error_before_exec(preprocessing_exc_tuple[2])
# Our own compiler remembers the __future__ environment. If we want to
# run code with a separate __future__ environment, use the default
# compiler
compiler = self.compile if shell_futures else CachingCompiler()
with self.builtin_trap:
cell_name = self.compile.cache(cell, self.execution_count)
with self.display_trap:
# Compile to bytecode
try:
code_ast = compiler.ast_parse(cell, filename=cell_name)
except IndentationError as e:
self.showindentationerror()
if store_history:
self.execution_count += 1
return error_before_exec(e)
except (OverflowError, SyntaxError, ValueError, TypeError,
MemoryError) as e:
self.showsyntaxerror()
if store_history:
self.execution_count += 1
return error_before_exec(e)
# Apply AST transformations
try:
code_ast = self.transform_ast(code_ast)
except InputRejected as e:
self.showtraceback()
if store_history:
self.execution_count += 1
return error_before_exec(e)
# Give the displayhook a reference to our ExecutionResult so it
# can fill in the output value.
self.displayhook.exec_result = result
# Execute the user code
interactivity = "none" if silent else self.ast_node_interactivity
self.run_ast_nodes(code_ast.body, cell_name,
interactivity=interactivity, compiler=compiler, result=result)
# Reset this so later displayed values do not modify the
# ExecutionResult
self.displayhook.exec_result = None
self.events.trigger('post_execute')
if not silent:
self.events.trigger('post_run_cell')
if store_history:
# Write output to the database. Does nothing unless
# history output logging is enabled.
self.history_manager.store_output(self.execution_count)
# Each cell is a *single* input, regardless of how many lines it has
self.execution_count += 1
return result
def transform_ast(self, node):
"""Apply the AST transformations from self.ast_transformers
Parameters
----------
node : ast.Node
The root node to be transformed. Typically called with the ast.Module
produced by parsing user input.
Returns
-------
An ast.Node corresponding to the node it was called with. Note that it
may also modify the passed object, so don't rely on references to the
original AST.
"""
for transformer in self.ast_transformers:
try:
node = transformer.visit(node)
except InputRejected:
# User-supplied AST transformers can reject an input by raising
# an InputRejected. Short-circuit in this case so that we
# don't unregister the transform.
raise
except Exception:
warn("AST transformer %r threw an error. It will be unregistered." % transformer)
self.ast_transformers.remove(transformer)
if self.ast_transformers:
ast.fix_missing_locations(node)
return node
def run_ast_nodes(self, nodelist, cell_name, interactivity='last_expr',
compiler=compile, result=None):
"""Run a sequence of AST nodes. The execution mode depends on the
interactivity parameter.
Parameters
----------
nodelist : list
A sequence of AST nodes to run.
cell_name : str
Will be passed to the compiler as the filename of the cell. Typically
the value returned by ip.compile.cache(cell).
interactivity : str
'all', 'last', 'last_expr' or 'none', specifying which nodes should be
run interactively (displaying output from expressions). 'last_expr'
will run the last node interactively only if it is an expression (i.e.
expressions in loops or other blocks are not displayed. Other values
for this parameter will raise a ValueError.
compiler : callable
A function with the same interface as the built-in compile(), to turn
the AST nodes into code objects. Default is the built-in compile().
result : ExecutionResult, optional
An object to store exceptions that occur during execution.
Returns
-------
True if an exception occurred while running code, False if it finished
running.
"""
if not nodelist:
return
if interactivity == 'last_expr':
if isinstance(nodelist[-1], ast.Expr):
interactivity = "last"
else:
interactivity = "none"
if interactivity == 'none':
to_run_exec, to_run_interactive = nodelist, []
elif interactivity == 'last':
to_run_exec, to_run_interactive = nodelist[:-1], nodelist[-1:]
elif interactivity == 'all':
to_run_exec, to_run_interactive = [], nodelist
else:
raise ValueError("Interactivity was %r" % interactivity)
try:
for i, node in enumerate(to_run_exec):
mod = ast.Module([node])
code = compiler(mod, cell_name, "exec")
if self.run_code(code, result):
return True
for i, node in enumerate(to_run_interactive):
mod = ast.Interactive([node])
code = compiler(mod, cell_name, "single")
if self.run_code(code, result):
return True
# Flush softspace
if softspace(sys.stdout, 0):
print()
except:
# It's possible to have exceptions raised here, typically by
# compilation of odd code (such as a naked 'return' outside a
# function) that did parse but isn't valid. Typically the exception
# is a SyntaxError, but it's safest just to catch anything and show
# the user a traceback.
# We do only one try/except outside the loop to minimize the impact
# on runtime, and also because if any node in the node list is
# broken, we should stop execution completely.
if result:
result.error_before_exec = sys.exc_info()[1]
self.showtraceback()
return True
return False
def run_code(self, code_obj, result=None):
"""Execute a code object.
When an exception occurs, self.showtraceback() is called to display a
traceback.
Parameters
----------
code_obj : code object
A compiled code object, to be executed
result : ExecutionResult, optional
An object to store exceptions that occur during execution.
Returns
-------
False : successful execution.
True : an error occurred.
"""
# Set our own excepthook in case the user code tries to call it
# directly, so that the IPython crash handler doesn't get triggered
old_excepthook, sys.excepthook = sys.excepthook, self.excepthook
# we save the original sys.excepthook in the instance, in case config
# code (such as magics) needs access to it.
self.sys_excepthook = old_excepthook
outflag = 1 # happens in more places, so it's easier as default
try:
try:
self.hooks.pre_run_code_hook()
#rprint('Running code', repr(code_obj)) # dbg
exec(code_obj, self.user_global_ns, self.user_ns)
finally:
# Reset our crash handler in place
sys.excepthook = old_excepthook
except SystemExit as e:
if result is not None:
result.error_in_exec = e
self.showtraceback(exception_only=True)
warn("To exit: use 'exit', 'quit', or Ctrl-D.", level=1)
except self.custom_exceptions:
etype, value, tb = sys.exc_info()
if result is not None:
result.error_in_exec = value
self.CustomTB(etype, value, tb)
except:
if result is not None:
result.error_in_exec = sys.exc_info()[1]
self.showtraceback()
else:
outflag = 0
return outflag
# For backwards compatibility
runcode = run_code
#-------------------------------------------------------------------------
# Things related to GUI support and pylab
#-------------------------------------------------------------------------
def enable_gui(self, gui=None):
raise NotImplementedError('Implement enable_gui in a subclass')
def enable_matplotlib(self, gui=None):
"""Enable interactive matplotlib and inline figure support.
This takes the following steps:
1. select the appropriate eventloop and matplotlib backend
2. set up matplotlib for interactive use with that backend
3. configure formatters for inline figure display
4. enable the selected gui eventloop
Parameters
----------
gui : optional, string
If given, dictates the choice of matplotlib GUI backend to use
(should be one of IPython's supported backends, 'qt', 'osx', 'tk',
'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
matplotlib (as dictated by the matplotlib build-time options plus the
user's matplotlibrc configuration file). Note that not all backends
make sense in all contexts, for example a terminal ipython can't
display figures inline.
"""
from IPython.core import pylabtools as pt
gui, backend = pt.find_gui_and_backend(gui, self.pylab_gui_select)
if gui != 'inline':
# If we have our first gui selection, store it
if self.pylab_gui_select is None:
self.pylab_gui_select = gui
# Otherwise if they are different
elif gui != self.pylab_gui_select:
print ('Warning: Cannot change to a different GUI toolkit: %s.'
' Using %s instead.' % (gui, self.pylab_gui_select))
gui, backend = pt.find_gui_and_backend(self.pylab_gui_select)
pt.activate_matplotlib(backend)
pt.configure_inline_support(self, backend)
# Now we must activate the gui pylab wants to use, and fix %run to take
# plot updates into account
self.enable_gui(gui)
self.magics_manager.registry['ExecutionMagics'].default_runner = \
pt.mpl_runner(self.safe_execfile)
return gui, backend
def enable_pylab(self, gui=None, import_all=True, welcome_message=False):
"""Activate pylab support at runtime.
This turns on support for matplotlib, preloads into the interactive
namespace all of numpy and pylab, and configures IPython to correctly
interact with the GUI event loop. The GUI backend to be used can be
optionally selected with the optional ``gui`` argument.
This method only adds preloading the namespace to InteractiveShell.enable_matplotlib.
Parameters
----------
gui : optional, string
If given, dictates the choice of matplotlib GUI backend to use
(should be one of IPython's supported backends, 'qt', 'osx', 'tk',
'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
matplotlib (as dictated by the matplotlib build-time options plus the
user's matplotlibrc configuration file). Note that not all backends
make sense in all contexts, for example a terminal ipython can't
display figures inline.
import_all : optional, bool, default: True
Whether to do `from numpy import *` and `from pylab import *`
in addition to module imports.
welcome_message : deprecated
This argument is ignored, no welcome message will be displayed.
"""
from IPython.core.pylabtools import import_pylab
gui, backend = self.enable_matplotlib(gui)
# We want to prevent the loading of pylab to pollute the user's
# namespace as shown by the %who* magics, so we execute the activation
# code in an empty namespace, and we update *both* user_ns and
# user_ns_hidden with this information.
ns = {}
import_pylab(ns, import_all)
# warn about clobbered names
ignored = {"__builtins__"}
both = set(ns).intersection(self.user_ns).difference(ignored)
clobbered = [ name for name in both if self.user_ns[name] is not ns[name] ]
self.user_ns.update(ns)
self.user_ns_hidden.update(ns)
return gui, backend, clobbered
#-------------------------------------------------------------------------
# Utilities
#-------------------------------------------------------------------------
def var_expand(self, cmd, depth=0, formatter=DollarFormatter()):
"""Expand python variables in a string.
The depth argument indicates how many frames above the caller should
be walked to look for the local namespace where to expand variables.
The global namespace for expansion is always the user's interactive
namespace.
"""
ns = self.user_ns.copy()
try:
frame = sys._getframe(depth+1)
except ValueError:
# This is thrown if there aren't that many frames on the stack,
# e.g. if a script called run_line_magic() directly.
pass
else:
ns.update(frame.f_locals)
try:
# We have to use .vformat() here, because 'self' is a valid and common
# name, and expanding **ns for .format() would make it collide with
# the 'self' argument of the method.
cmd = formatter.vformat(cmd, args=[], kwargs=ns)
except Exception:
# if formatter couldn't format, just let it go untransformed
pass
return cmd
def mktempfile(self, data=None, prefix='ipython_edit_'):
"""Make a new tempfile and return its filename.
This makes a call to tempfile.mkstemp (created in a tempfile.mkdtemp),
but it registers the created filename internally so ipython cleans it up
at exit time.
Optional inputs:
- data(None): if data is given, it gets written out to the temp file
immediately, and the file is closed again."""
dirname = tempfile.mkdtemp(prefix=prefix)
self.tempdirs.append(dirname)
handle, filename = tempfile.mkstemp('.py', prefix, dir=dirname)
os.close(handle) # On Windows, there can only be one open handle on a file
self.tempfiles.append(filename)
if data:
tmp_file = open(filename,'w')
tmp_file.write(data)
tmp_file.close()
return filename
# TODO: This should be removed when Term is refactored.
def write(self,data):
"""Write a string to the default output"""
io.stdout.write(data)
# TODO: This should be removed when Term is refactored.
def write_err(self,data):
"""Write a string to the default error output"""
io.stderr.write(data)
def ask_yes_no(self, prompt, default=None, interrupt=None):
if self.quiet:
return True
return ask_yes_no(prompt,default,interrupt)
def show_usage(self):
"""Show a usage message"""
page.page(IPython.core.usage.interactive_usage)
def extract_input_lines(self, range_str, raw=False):
"""Return as a string a set of input history slices.
Parameters
----------
range_str : string
The set of slices is given as a string, like "~5/6-~4/2 4:8 9",
since this function is for use by magic functions which get their
arguments as strings. The number before the / is the session
number: ~n goes n back from the current session.
raw : bool, optional
By default, the processed input is used. If this is true, the raw
input history is used instead.
Notes
-----
Slices can be described with two notations:
* ``N:M`` -> standard python form, means including items N...(M-1).
* ``N-M`` -> include items N..M (closed endpoint).
"""
lines = self.history_manager.get_range_by_str(range_str, raw=raw)
return "\n".join(x for _, _, x in lines)
def find_user_code(self, target, raw=True, py_only=False, skip_encoding_cookie=True, search_ns=False):
"""Get a code string from history, file, url, or a string or macro.
This is mainly used by magic functions.
Parameters
----------
target : str
A string specifying code to retrieve. This will be tried respectively
as: ranges of input history (see %history for syntax), url,
corresponding .py file, filename, or an expression evaluating to a
string or Macro in the user namespace.
raw : bool
If true (default), retrieve raw history. Has no effect on the other
retrieval mechanisms.
py_only : bool (default False)
Only try to fetch python code, do not try alternative methods to decode file
if unicode fails.
Returns
-------
A string of code.
ValueError is raised if nothing is found, and TypeError if it evaluates
to an object of another type. In each case, .args[0] is a printable
message.
"""
code = self.extract_input_lines(target, raw=raw) # Grab history
if code:
return code
utarget = unquote_filename(target)
try:
if utarget.startswith(('http://', 'https://')):
return openpy.read_py_url(utarget, skip_encoding_cookie=skip_encoding_cookie)
except UnicodeDecodeError:
if not py_only :
# Deferred import
try:
from urllib.request import urlopen # Py3
except ImportError:
from urllib import urlopen
response = urlopen(target)
return response.read().decode('latin1')
raise ValueError(("'%s' seem to be unreadable.") % utarget)
potential_target = [target]
try :
potential_target.insert(0,get_py_filename(target))
except IOError:
pass
for tgt in potential_target :
if os.path.isfile(tgt): # Read file
try :
return openpy.read_py_file(tgt, skip_encoding_cookie=skip_encoding_cookie)
except UnicodeDecodeError :
if not py_only :
with io_open(tgt,'r', encoding='latin1') as f :
return f.read()
raise ValueError(("'%s' seem to be unreadable.") % target)
elif os.path.isdir(os.path.expanduser(tgt)):
raise ValueError("'%s' is a directory, not a regular file." % target)
if search_ns:
# Inspect namespace to load object source
object_info = self.object_inspect(target, detail_level=1)
if object_info['found'] and object_info['source']:
return object_info['source']
try: # User namespace
codeobj = eval(target, self.user_ns)
except Exception:
raise ValueError(("'%s' was not found in history, as a file, url, "
"nor in the user namespace.") % target)
if isinstance(codeobj, string_types):
return codeobj
elif isinstance(codeobj, Macro):
return codeobj.value
raise TypeError("%s is neither a string nor a macro." % target,
codeobj)
#-------------------------------------------------------------------------
# Things related to IPython exiting
#-------------------------------------------------------------------------
def atexit_operations(self):
"""This will be executed at the time of exit.
Cleanup operations and saving of persistent data that is done
unconditionally by IPython should be performed here.
For things that may depend on startup flags or platform specifics (such
as having readline or not), register a separate atexit function in the
code that has the appropriate information, rather than trying to
clutter
"""
# Close the history session (this stores the end time and line count)
# this must be *before* the tempfile cleanup, in case of temporary
# history db
self.history_manager.end_session()
# Cleanup all tempfiles and folders left around
for tfile in self.tempfiles:
try:
os.unlink(tfile)
except OSError:
pass
for tdir in self.tempdirs:
try:
os.rmdir(tdir)
except OSError:
pass
# Clear all user namespaces to release all references cleanly.
self.reset(new_session=False)
# Run user hooks
self.hooks.shutdown_hook()
def cleanup(self):
self.restore_sys_module_state()
class InteractiveShellABC(with_metaclass(abc.ABCMeta, object)):
"""An abstract base class for InteractiveShell."""
InteractiveShellABC.register(InteractiveShell)
| apache-2.0 |
perryjohnson/biplaneblade | sandia_blade_lib/prep_stn18_mesh.py | 1 | 24361 | """Write initial TrueGrid files for one Sandia blade station.
Usage
-----
start an IPython (qt)console with the pylab flag:
$ ipython qtconsole --pylab
or
$ ipython --pylab
Then, from the prompt, run this script:
|> %run sandia_blade_lib/prep_stnXX_mesh.py
or
|> import sandia_blade_lib/prep_stnXX_mesh
Author: Perry Roth-Johnson
Last updated: April 10, 2014
"""
import matplotlib.pyplot as plt
import lib.blade as bl
import lib.poly_utils as pu
from shapely.geometry import Polygon
# SET THESE PARAMETERS -----------------
station_num = 18
# --------------------------------------
plt.close('all')
# load the Sandia blade
m = bl.MonoplaneBlade('Sandia blade SNL100-00', 'sandia_blade')
# pre-process the station dimensions
station = m.list_of_stations[station_num-1]
station.airfoil.create_polygon()
station.structure.create_all_layers()
station.structure.save_all_layer_edges()
station.structure.write_all_part_polygons()
# plot the parts
station.plot_parts()
# access the structure for this station
st = station.structure
# upper spar cap -----------------------------------------------------------
label = 'upper spar cap'
# create the bounding polygon
usc = st.spar_cap.layer['upper']
is2 = st.internal_surface_2.layer['resin']
points_usc = [
(-0.75, usc.left[0][1]), # SparCap_upper.txt
is2.polygon.interiors[0].coords[-2], # InternalSurface2_resin.txt
( 0.74, 0.89812764), # InternalSurface2_resin.txt
( 0.75, usc.right[1][1]), # SparCap_upper.txt
( 0.75, 1.3),
(-0.75, 1.3)
]
bounding_polygon = Polygon(points_usc)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_2, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_2, 'triax', label,
bounding_polygon)
# lower spar cap -----------------------------------------------------------
label = 'lower spar cap'
# create the bounding polygon
lsc = st.spar_cap.layer['lower']
points_lsc = [
(-0.75,-1.6),
( 0.75,-1.6),
(0.75000000, lsc.right[0][1]), # SparCap_lower.txt
(0.74000000, -0.98881226), # InternalSurface2_resin.txt
is2.polygon.interiors[0].coords[-1], # InternalSurface2_resin.txt
(-0.75000000, lsc.left[1][1]) # SparCap_lower.txt
]
bounding_polygon = Polygon(points_lsc)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_2, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_2, 'triax', label,
bounding_polygon)
# TE reinforcement, upper 1 ------------------------------------------------
label = 'TE reinforcement, upper 1'
# create the bounding polygon
ter = st.TE_reinforcement.layer['foam']
points_teu1 = [
(ter.top[0][0], 0.35), # TE_Reinforcement_foam.txt
tuple(ter.top[0]), # TE_Reinforcement_foam.txt
(3.64, 0.16),
(3.80339848, 0.12895125), # InternalSurface4_resin.txt
(3.80339848, 0.35) # InternalSurface4_resin.txt
]
bounding_polygon = Polygon(points_teu1)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_4, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_4, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'foam', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# TE reinforcement, lower 1 ------------------------------------------------
label = 'TE reinforcement, lower 1'
# create the bounding polygon
points_tel1 = [
(ter.bottom[0][0], -0.1), # TE_Reinforcement_foam.txt
tuple(ter.bottom[1]), # TE_Reinforcement_foam.txt
(3.65, 0.11),
(3.7, 0.12),
points_teu1[-2], # InternalSurface4_resin.txt
(points_teu1[-1][0], -0.1) # InternalSurface4_resin.txt
]
bounding_polygon = Polygon(points_tel1)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_4, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_4, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'foam', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# TE reinforcement, upper 2 ------------------------------------------------
label = 'TE reinforcement, upper 2'
# create the bounding polygon
points_teu2 = [
points_teu1[-1],
points_teu1[-2],
(3.84865423, 0.12645368), # InternalSurface4_triax.txt
(3.84865423, 0.35) # InternalSurface4_triax.txt
]
bounding_polygon = Polygon(points_teu2)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_4, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_4, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'foam', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# TE reinforcement, lower 2 ------------------------------------------------
label = 'TE reinforcement, lower 2'
# create the bounding polygon
points_tel2 = [
(points_teu2[0][0], -0.1),
points_teu2[1],
points_teu2[2],
(points_teu2[2][0], -0.1)
]
bounding_polygon = Polygon(points_tel2)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_4, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_4, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'foam', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# TE reinforcement, upper 3 ------------------------------------------------
label = 'TE reinforcement, upper 3'
# create the bounding polygon
points_teu3 = [
points_teu2[-1],
points_teu2[-2],
ter.polygon.exterior.coords[0], # TE_Reinforcement_foam.txt
(ter.polygon.exterior.coords[0][0], 0.35) # TE_Reinforcement_foam.txt
]
bounding_polygon = Polygon(points_teu3)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'foam', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# TE reinforcement, lower 3 ------------------------------------------------
label = 'TE reinforcement, lower 3'
# create the bounding polygon
points_tel3 = [
(points_teu3[0][0], -0.1),
points_teu3[1],
points_teu3[2],
(points_teu3[2][0], -0.1)
]
bounding_polygon = Polygon(points_tel3)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'foam', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# TE reinforcement, upper 4 ------------------------------------------------
label = 'TE reinforcement, upper 4'
# create the bounding polygon
es = st.external_surface.layer['gelcoat']
points_teu4 = [
points_teu3[-1],
points_teu3[-2],
(4.65942400, 0.003), # TE_Reinforcement_uniax.txt
(4.65942400, 0.01758013), # TE_Reinforcement_uniax.txt
es.polygon.exterior.coords[-2],
(4.65942400, 0.35) # TE_Reinforcement_uniax.txt
]
bounding_polygon = Polygon(points_teu4)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# TE reinforcement, lower 4 ------------------------------------------------
label = 'TE reinforcement, lower 4'
# create the bounding polygon
points_tel4 = [
(points_teu4[0][0], -0.1),
points_teu4[1],
points_teu4[2],
(4.65942400, -0.01440373), # TE_Reinforcement_uniax.txt
es.polygon.exterior.coords[-1],
(points_teu4[2][0], -0.1)
]
bounding_polygon = Polygon(points_tel4)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# LE panel -----------------------------------------------------------------
label = 'LE panel'
# create the bounding polygon
lep = st.LE_panel.layer['foam']
is1 = st.internal_surface_1.layer['resin']
points_le = [
(-3.00,-1.6),
(-0.836,-1.6),
tuple(lep.bottom[0]), # LE_Panel_foam.txt
is1.polygon.interiors[0].coords[-2], # InternalSurface1_resin.txt
(-1.5, 0.0),
is1.polygon.interiors[0].coords[-1], # InternalSurface1_resin.txt
tuple(lep.top[1]), # LE_Panel_foam.txt
(-0.836, 1.3),
(-3.00, 1.3)
]
bounding_polygon = Polygon(points_le)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
# upper aft panel 1 -------------------------------------------------------
label = 'upper aft panel 1'
# create the bounding polygon
ap1u = st.aft_panel_1.layer['upper']
is3 = st.internal_surface_3.layer['resin']
points_ap1u = [
(0.836, 1.3),
(ap1u.right[1][0], 1.3), # AftPanel1_upper.txt
tuple(ap1u.right[1]), # AftPanel1_upper.txt
is3.polygon.interiors[0].coords[-1], # InternalSurface3_resin.txt
(1.2, 0.5),
(0.84600000, 0.95568319), # InternalSurface3_resin.txt
tuple(ap1u.left[0]) # AftPanel1_upper.txt
]
bounding_polygon = Polygon(points_ap1u)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_3, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_3, 'triax', label,
bounding_polygon)
# lower aft panel 1 -------------------------------------------------------
label = 'lower aft panel 1'
# create the bounding polygon
ap1l = st.aft_panel_1.layer['lower']
points_ap1l = [
(0.836, -1.6),
(ap1l.right[0][0], -1.6), # AftPanel1_lower.txt
tuple(ap1l.right[0]), # AftPanel1_lower.txt
(2.35558711, -0.39025883), # InternalSurface3_resin.txt
(1.2, -0.3),
(0.84600000, -1.02814653), # InternalSurface3_resin.txt
tuple(ap1l.left[1]) # AftPanel1_lower.txt
]
bounding_polygon = Polygon(points_ap1l)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_3, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_3, 'triax', label,
bounding_polygon)
# upper aft panel 2 -------------------------------------------------------
label = 'upper aft panel 2'
# create the bounding polygon
ap2u = st.aft_panel_2.layer['upper']
is4 = st.internal_surface_4.layer['resin']
sw3br = st.shear_web_3.layer['biax, right']
points_ap2u = [
(sw3br.right[0][0], 1.3),
(ap2u.right[1][0], 1.3), # AftPanel2_upper.txt
tuple(ap2u.right[1]), # AftPanel2_upper.txt
(3.65502400, 0.23368491), # InternalSurface4_resin.txt
(3.0, 0.2),
(2.46158711, 0.57115948), # InternalSurface4_resin.txt
tuple(ap2u.left[0]) # AftPanel2_upper.txt
]
bounding_polygon = Polygon(points_ap2u)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_4, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_4, 'triax', label,
bounding_polygon)
# lower aft panel 2 -------------------------------------------------------
label = 'lower aft panel 2'
# create the bounding polygon
ap2l = st.aft_panel_2.layer['lower']
is4 = st.internal_surface_4.layer['resin']
sw3br = st.shear_web_3.layer['biax, right']
points_ap2l = [
(sw3br.right[0][0], -1.6),
(ap2l.right[0][0], -1.6), # AftPanel2_lower.txt
tuple(ap2l.right[0]), # AftPanel2_lower.txt
(3.65502400, 0.04031627), # InternalSurface4_resin.txt
(3.0, 0.1),
(2.46158711, -0.34760317), # InternalSurface4_resin.txt
tuple(ap2l.left[1]) # AftPanel2_lower.txt
]
bounding_polygon = Polygon(points_ap2l)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_4, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_4, 'triax', label,
bounding_polygon)
# above shear web 1 ----------------------------------------------------------
label = 'above shear web 1'
# create the bounding polygon
points_asw1 = [
(-0.75, 2.1),
(-0.75, 0.1),
(-0.836, 0.1),
(-0.836, 2.1)
]
bounding_polygon = Polygon(points_asw1)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
# below shear web 1 ----------------------------------------------------------
label = 'below shear web 1'
# create the bounding polygon
points_bsw1 = [
(-0.75, -2.1),
(-0.75, -0.1),
(-0.836, -0.1),
(-0.836, -2.1)
]
bounding_polygon = Polygon(points_bsw1)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
# above shear web 2 ----------------------------------------------------------
label = 'above shear web 2'
# create the bounding polygon
points_asw2 = [
(0.75, 2.1),
(0.75, 0.1),
(0.836, 0.1),
(0.836, 2.1)
]
bounding_polygon = Polygon(points_asw2)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
# below shear web 2 ----------------------------------------------------------
label = 'below shear web 2'
# create the bounding polygon
points_bsw2 = [
(0.75, -2.1),
(0.75, -0.1),
(0.836, -0.1),
(0.836, -2.1)
]
bounding_polygon = Polygon(points_bsw2)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
# above shear web 3 ----------------------------------------------------------
label = 'above shear web 3'
# create the bounding polygon
sw3bl = st.shear_web_3.layer['biax, left']
points_asw3 = [
(sw3bl.left[0][0], 1.0),
(sw3bl.left[0][0], 0.1),
(sw3br.right[0][0], 0.1),
(sw3br.right[0][0], 1.0)
]
bounding_polygon = Polygon(points_asw3)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
# below shear web 3 ----------------------------------------------------------
label = 'below shear web 3'
# create the bounding polygon
points_bsw3 = [
(sw3bl.left[0][0], -1.0),
(sw3bl.left[0][0], -0.1),
(sw3br.right[0][0], -0.1),
(sw3br.right[0][0], -1.0)
]
bounding_polygon = Polygon(points_bsw3)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
# left of shear web 1 -------------------------------------------------------
label = 'left of shear web 1'
# create the bounding polygon
points_lsw1 = points_le[2:-2]
bounding_polygon = Polygon(points_lsw1)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
# right of shear web 1 -------------------------------------------------------
label = 'right of shear web 1'
# create the bounding polygon
points_rsw1 = [
points_usc[0],
points_usc[1],
(0.0, 0.0),
points_lsc[-2],
points_lsc[-1]
]
bounding_polygon = Polygon(points_rsw1)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.internal_surface_2, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_2, 'triax', label,
bounding_polygon)
# left of shear web 2 -------------------------------------------------------
label = 'left of shear web 2'
# create the bounding polygon
points_lsw2 = [
points_usc[3],
points_usc[2],
(0.0, 0.0),
points_lsc[3],
points_lsc[2]
]
bounding_polygon = Polygon(points_lsw2)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.internal_surface_2, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_2, 'triax', label,
bounding_polygon)
# right of shear web 2 -------------------------------------------------------
label = 'right of shear web 2'
# create the bounding polygon
points_rsw2 = [
points_ap1u[-1],
points_ap1u[-2],
(1.5, 0.0),
points_ap1l[-2],
points_ap1l[-1]
]
bounding_polygon = Polygon(points_rsw2)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.internal_surface_3, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_3, 'triax', label,
bounding_polygon)
# left of shear web 3 -------------------------------------------------------
label = 'left of shear web 3'
# create the bounding polygon
points_lsw3 = [
points_ap1u[2],
points_ap1u[3],
(2.0, 0.0),
points_ap1l[3],
points_ap1l[2]
]
bounding_polygon = Polygon(points_lsw3)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.internal_surface_3, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_3, 'triax', label,
bounding_polygon)
# right of shear web 3 -------------------------------------------------------
label = 'right of shear web 3'
# create the bounding polygon
points_rsw3 = [
points_ap2u[-1],
points_ap2u[-2],
(3.0, 0.0),
points_ap2l[-2],
points_ap2l[-1]
]
bounding_polygon = Polygon(points_rsw3)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.internal_surface_4, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_4, 'triax', label,
bounding_polygon)
# show the plot
plt.show()
# write the TrueGrid input file for mesh generation ---------------------
st.write_truegrid_inputfile(
interrupt_flag=True,
additional_layers=[
st.spar_cap.layer['upper'],
st.spar_cap.layer['lower'],
st.aft_panel_1.layer['upper'],
st.aft_panel_1.layer['lower'],
st.aft_panel_2.layer['upper'],
st.aft_panel_2.layer['lower'],
st.LE_panel.layer['foam'],
st.shear_web_1.layer['biax, left'],
st.shear_web_1.layer['foam'],
st.shear_web_1.layer['biax, right'],
st.shear_web_2.layer['biax, left'],
st.shear_web_2.layer['foam'],
st.shear_web_2.layer['biax, right'],
st.shear_web_3.layer['biax, left'],
st.shear_web_3.layer['foam'],
st.shear_web_3.layer['biax, right']
],
alt_TE_reinforcement=True,
soft_warning=False)
| gpl-3.0 |
CSLDepend/raven2_sim | run.py | 1 | 23139 | '''/* Runs Raven 2 simulator by calling packet generator, Raven control software, and visualization code
* Copyright (C) 2015 University of Illinois Board of Trustees, DEPEND Research Group, Creators: Homa Alemzadeh and Daniel Chen
*
* This file is part of Raven 2 Surgical Simulator.
*
* Raven 2 Surgical Simulator is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Raven 2 Surgical Simulator is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Raven 2 Control. If not, see <http://www.gnu.org/licenses/>.
*/'''
import os
import subprocess
import random
import sys
from math import cos, sin, sqrt, acos, asin, pow as pow_f
import socket
import sys
import numpy as np
import struct
import time
import datetime
import signal
from sys import argv
import mfi
import logging
import csv
import matplotlib.pyplot as plt
import math
import time
import shelve
def rsp_func():
""" Get response from user to check if raven_home directory is correct"""
rsp = str(raw_input("Is the Raven Home found correctly (Yes/No)? "))
if rsp.lower() == 'yes' or rsp.lower() == 'y':
print 'Found Raven Home Directory.. Starting..\n'
elif rsp.lower() == 'no' or rsp.lower() == 'n':
print 'Please change the ROS_PACKAGE_PATH environment variable.\n'
sys.exit(2)
else:
rsp_func()
def initLogger(logger, log_file):
""" Initialize a logger for console and file"""
fh = logging.FileHandler(log_file)
fh.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
FORMAT = '%(asctime)s - %(message)s'
formatter = logging.Formatter(FORMAT)
ch.setFormatter(formatter)
fh.setFormatter(formatter)
logger.addHandler(ch)
logger.addHandler(fh)
logger.setLevel(logging.INFO)
class Raven():
""" Implements the Raven class to run different Raven experiments"""
def __init__(self, raven_home, mode, packet_gen, injection, trajectory):
""" Init variables """
self.mode = mode
self.packet_gen = packet_gen
self.traj = trajectory
self.raven_home = raven_home
self.shelve_file = raven_home + "/run.shelve"
self.surgeon_simulator = 1
self.defines_changed = 0
self.mfi_changed = 0
self.defines_src_file = raven_home + "/include/raven/defines.h"
self.defines_bkup_file = raven_home + "/include/raven/defines_back.h"
self.defines_chk_file = raven_home + "/include/raven/defines_last_run"
self.master_file = './selected_injection.txt'
self.inj_line = ''
self.trj_name = ''
self.defines_changed = 0
self.mfi_changed = 0
self.return_code = 0 #0 is normal, 1 is error
self.curr_inj = -1
self.rviz_enabled = 0
self.result_folder = ''
self.exp_status = '' # expriment status: 'running' or 'done'
inj = injection.split(':')
self.injection = inj[0]
self.starting_inj_num = 0
self.end_inj_num = -1
self.title = ''
if len(inj) > 1:
param = inj[1].split('-')
self.starting_inj_num = int(param[0])
if len(param) > 1:
self.end_inj_num = int(param[1])
def __change_defines_h(self):
""" Modifies <raven_home>/include/raven/defines.h """
# Change define macros
cmd = 'cp ' + self.defines_src_file + ' ' + self.defines_bkup_file
os.system(cmd)
#open files
src_fp = open(self.defines_src_file,'w')
bkup_fp = open(self.defines_bkup_file,'r')
for line in bkup_fp:
if line.startswith('//#define simulator'):
if (self.mode == 'sim' or self.mode == 'dyn_sim') and not(self.mode == 'detect'):
line = line.lstrip('//')
elif line.startswith('//#define dyn_simulator'):
if self.mode == 'dyn_sim' or self.mode == 'detect':
line = line.lstrip('//')
elif line.startswith('//#define packetgen'):
if self.packet_gen == '1':
line = line.lstrip('//')
elif line.startswith('//#define mfi'):
if self.injection == 'mfi' or self.injection == 'mfi2':
line = line.lstrip('//')
elif line.startswith('//#define detector'):
if self.mode == 'detect':
line = line.lstrip('//')
src_fp.write(line)
src_fp.close()
bkup_fp.close()
#save a check file
cmd = 'cp ' + self.defines_src_file + ' ' + self.defines_chk_file
os.system(cmd)
self.defines_changed = 1
def __restore_defines_h(self):
""" Restores <raven_home>/include/raven/defines.h """
#restore file
cmd = 'chmod 777 ' + self.defines_bkup_file;
os.system(cmd);
cmd = 'cp ' + self.defines_bkup_file + ' ' + self.defines_src_file
# delete backup
if (os.system(cmd) == 0):
cmd = 'rm ' + self.defines_bkup_file;
os.system(cmd);
self.defines_changed = 0
def __mfi_insert_code(self, file_name, mfi_hook, code):
""" Insert code to <file_name> at location <mfi_hook>"""
self.mfi_src_file = self.raven_home + "/src/raven/" + file_name
self.mfi_bkup_file = self.raven_home + "/src/raven/" + file_name + '.bkup'
self.mfi_chk_file = self.raven_home + "/src/raven/" + file_name + '.chk'
#save a backup file
cmd = 'cp ' + self.mfi_src_file + ' ' + self.mfi_bkup_file
os.system(cmd)
self.mfi_changed = 1
#open files
src_fp = open(self.mfi_src_file, 'w')
bkup_fp = open(self.mfi_bkup_file, 'r')
for line in bkup_fp:
src_fp.write(line)
if line.startswith(mfi_hook):
src_fp.write(code)
src_fp.close()
bkup_fp.close()
#save a check file
cmd = 'cp ' + self.mfi_src_file + ' ' + self.mfi_chk_file
os.system(cmd)
def __mfi_insert_code2(self, file_name, mfi_hook, trigger, target):
""" Insert code to <file_name> at location <mfi_hook>
Example: if (x > 3 && x < 5) {x = 40}
"""
trigger_line = ' && '.join(trigger)
# target[0] variable name, target[1] value
# For R matrices injected values are based on absolute values of yaw, roll, pitch
if ((target[0] == 'u.R_l') or (target[0] == 'u.R_r')):
code = 'if (' + trigger_line + ') { ';
elems = target[1].split(';');
for i in range(0,3):
for j in range(0,3):
code =code+target[0]+'['+str(i)+']['+str(j)+']='+ elems[i*3+j]+'; ';
code = code + '}\n';
# For thetas and USBs the injected value is absolute
elif (target[0].find('jpos') > -1) or (file_name.find('USB') > -1):
code = 'if (' + trigger_line + ') { ' + target[0] + ' = ' + target[1] + ';}\n'
# For position the injected value is incremental
else:
code = 'if (' + trigger_line + ') { ' + target[0] + '+= ' + target[1] + ';}\n'
self.__mfi_insert_code(file_name, mfi_hook, code)
return (file_name + ':' + mfi_hook + ':' + code)
def __restore_mfi(self):
""" Restores the source file which changed by __mfi_insert_code()"""
#restore file
cmd = 'chmod 777 '+self.mfi_bkup_file;
os.system(cmd);
cmd = 'cp ' + self.mfi_bkup_file + ' ' + self.mfi_src_file
# delete backup
if (os.system(cmd) == 0):
cmd = 'rm ' + self.mfi_bkup_file;
os.system(cmd);
self.mfi_changed = 0
def __copy_files(self):
# Save latest_run.csv to result_folder for faultfree run
if self.injection == 'mfi2' and self.exp_status == 'done':
cmd = 'cp latest_run.csv ' + self.result_folder + '/' + \
str(self.curr_inj).zfill(4) + '.csv'
os.system(cmd)
cmd = 'cp mfi2.txt ' + self.result_folder
os.system(cmd)
cmd = 'cp mfi2_params.csv ' + self.result_folder + '/' + self.param_name
os.system(cmd)
cmd = 'cp ./robot_run.csv ' + self.result_folder + '/' + self.traj_name
#cmd = 'cp ./golden_run/traj2.csv ' + self.result_folder + '/' + self.traj_name
os.system(cmd)
def __quit(self):
""" Terminate all process started by _run_experiment() """
# Restore changes to source code
if self.defines_changed:
self.__restore_defines_h()
if self.mfi_changed:
self.__restore_mfi()
try:
r2_control_pid = subprocess.check_output("pgrep r2_control",
shell=True)
os.killpg(int(r2_control_pid), signal.SIGINT)
time.sleep(1)
except:
pass
try:
roslaunch_pid = subprocess.check_output("pgrep roslaunch",
shell=True)
os.killpg(int(roslaunch_pid), signal.SIGINT)
time.sleep(1)
except:
pass
try:
os.killpg(self.raven_proc.pid, signal.SIGINT)
time.sleep(1)
except:
pass
try:
os.killpg(self.packet_proc.pid, signal.SIGINT)
time.sleep(1)
except:
pass
try:
os.killpg(self.rostopic_proc.pid, signal.SIGINT)
time.sleep(1)
except:
pass
try:
os.killpg(self.dynSim_proc.pid, signal.SIGINT)
time.sleep(1)
except:
pass
os.system("rm /tmp/dac_fifo > /dev/null 2>&1")
os.system("rm /tmp/mpos_vel_fifo > /dev/null 2>&1")
os.system("killall roslaunch > /dev/null 2>&1")
os.system("killall rostopic > /dev/null 2>&1")
os.system("killall r2_control > /dev/null 2>&1")
os.system("killall roscore > /dev/null 2>&1")
os.system("killall rosmaster > /dev/null 2>&1")
if self.rviz_enabled:
os.system("killall rviz > /dev/null 2>&1")
os.system("killall xterm > /dev/null 2>&1")
os.system("killall two_arm_dyn > /dev/null 2>&1")
#os.system("killall python") # Don't work with run_mfi_experiment()
def _compile_raven(self):
""" Compile Raven source code """
self.__change_defines_h()
# Make the file
print "Compiling Raven...logged to compile.output."
cmd = 'cd ' + self.raven_home + ';make -j 1> compile.output 2>&1'
make_ret = os.system(cmd)
if self.defines_changed:
self.__restore_defines_h()
if self.mfi_changed:
self.__restore_mfi()
if (make_ret != 0):
print "Make Error: Compilation Failed..\n"
self.__quit()
sys.exit(0)
def _run_experiment(self):
""" Run Raven experiment once. """
# Experiment status
self.exp_status = 'running'
# Open Sockets
UDP_IP = "127.0.0.1"
UDP_PORT = 34000
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.bind((UDP_IP,UDP_PORT))
# Setup Variables
ravenTask = "xterm -hold -e 'roslaunch raven_2 raven_2.launch'"
#ravenTask = "xterm -hold -e 'LD_PRELOAD=/home/raven/homa_wksp/malicious_wrapper/malicious_wrapper.so roslaunch raven_2 raven_2.launch'"
visTask = 'xterm -e roslaunch raven_visualization raven_visualization.launch'
pubTask = 'roslaunch raven_visualization raven_state_publisher.launch'
dynSimTask = 'xterm -e "cd ./Li_DYN && make -j && ./two_arm_dyn"'
rostopicTask = 'rostopic echo -p ravenstate >'+self.raven_home+'/latest_run.csv'
if (self.surgeon_simulator == 1):
packetTask = 'xterm -e python '+self.raven_home+'/Real_Packet_Generator_Surgeon.py '+ self.mode + ' '+ self.traj
#print(packetTask)
else:
packetTask = 'xterm -e python '+self.raven_home+'/Packet_Generator.py'
# Call publisher, visualization, packet generator, and Raven II software
if self.rviz_enabled:
vis_proc = subprocess.Popen(visTask, env=env, shell=True, preexec_fn=os.setsid)
time.sleep(2)
else:
pub_proc = subprocess.Popen(pubTask, env=env, shell=True, preexec_fn=os.setsid)
time.sleep(1)
if self.packet_gen == "1":
self.packet_proc = subprocess.Popen(packetTask, shell=True, preexec_fn=os.setsid)
print "Using the packet generator.."
elif self.packet_gen == "0":
print "Waiting for the GUI packets.."
else:
print usage
sys.exit(2)
self.raven_proc = subprocess.Popen(ravenTask, env=env, shell=True, preexec_fn=os.setsid)
# Call rostopic to log the data from this RAVEN into latest_run.csv
self.rostopic_proc = subprocess.Popen(rostopicTask, env=env, shell=True, preexec_fn=os.setsid)
time.sleep(0.2);
# Call Dynamic Simulator
if self.mode == "dyn_sim" or self.mode == "detect":
self.dynSim_proc = subprocess.Popen(dynSimTask, env=env, shell=True, preexec_fn=os.setsid)
#os.system("cd ./Li_DYN && make -j && ./two_arm_dyn")
print "Started the dynamic simulator.."
print("Press Ctrl+C to exit.")
#Wait for a response from the robot
data = ''
while not data:
print("Waiting for Raven to be done...")
data = sock.recvfrom(100)
if data[0].find('Done!') > -1:
print("Raven is done, shutdown everything...")
self.return_code = 0
elif data[0].find('Stopped') > -1:
print("Raven is E-stopped, shutdown everything...")
self.return_code = 1
else:
data = ''
self.exp_status = 'done'
self.__quit()
def _run_mfi_experiment(self):
""" Run mfi experiment according to the master_file """
cur_inj = -1
saved_param = []
with open(self.master_file) as fp:
target_file = ''
mfi_hook = ''
trigger = []
target = []
for line in fp:
# Strip '\n' from each line then split by ','
line = line.strip('\n')
param = line.split(',')
# Skip lines begin with # or empty line
if param[0] == '' or param[0] == '#':
continue
# Read location info
elif param[0] == 'location':
location_info = param[1].split(':')
target_file = location_info[0].lstrip()
mfi_hook = location_info[1]
# Read trigger info
elif param[0] == 'trigger':
param.pop(0)
trigger = [item.strip() for item in param]
elif param[0] == 'target_r':
param.pop(0)
saved_param = param
target = (mfi.generate_target_r(saved_param)).split(' ')
elif param[0] == 'injection':
if cur_inj != int(param[1]):
cur_inj = int(param[1])
print("mfi: setup injection %d" % cur_inj)
else:
# Injection starts at argv[1]
# Example starting_inj_num is 3.2
if int(param[1]) >= self.starting_inj_num:
# If param == 3, indicate do random injection param[2] times.
if len(param) == 3:
for x in xrange(int(param[2])):
#target = (mfi.generate_target_r(saved_param)).split(' ')
target = (mfi.generate_target_r_stratified(saved_param, int(param[2]), x)).split(' ')
inj_info = self.__mfi_insert_code2(target_file, mfi_hook, trigger, target)
logger.info("injecting to %d.%d %s" % (cur_inj, x, inj_info))
self._compile_raven()
self._run_experiment()
self._run_plot()
else:
inj_info = self.__mfi_insert_code2(target_file, line, trigger, target)
logger.info("injecting to %d %s" % (cur_inj, inj_info))
self._compile_raven()
self._run_experiment()
self._run_plot()
def __setup_result_folder(self):
"""Create a folder to store the mfi2_experiment results."""
time = datetime.datetime.now().strftime("%Y%m%d_%H%M")
if self.title == 'mfi2_empty_test':
self.result_folder = self.raven_home + '/exp_result/fault_free/' + self.title.rstrip() + '_' + time+'_'+self.mode
else:
self.result_folder = self.raven_home + '/exp_result/faulty/' + self.title.rstrip() + '_' + time+'_'+self.mode
self.traj_name = time + '.trj'
self.param_name = time+ '.param'
cmd = 'mkdir -p ' + self.result_folder
os.system(cmd)
def __run_parse_plot(self, mode, inj_num,traj):
"""Call parse_plot.py and copy figures folder to exp_result folder."""
print "copy figures" #dchen8
duration = 0
value = 0.0
with open('mfi2_params.csv', 'r') as infile:
reader = csv.reader(infile)
for line in reader:
if int(line[0]) == inj_num:
duration = line[3]
value = line[4]
break
os.system("python parse_plot.py " + str(mode) + " " + str(inj_num) + " "+str(traj))
cmd = 'cp -r figures/ ' + self.result_folder + '/inj' + '_'.join([str(inj_num), duration, value])
os.system(cmd)
def _need_compile(self, param):
myshelve = shelve.open(self.shelve_file)
if 'param' in myshelve:
if myshelve['param'] == param:
print "match!!!"
return False
else:
myshelve['param'] = param
return True
else:
myshelve['param'] = param
return True
def _run_mfi2_experiment(self):
try:
os.remove(self.shelve_file)
except:
pass
""" New mfi injection using the file generated by generate_mfi2.py"""
code_file = 'mfi2.txt'
file_name = ''
mfi_hook = ''
with open(code_file, 'r') as infile:
""" Example lines:
location:network_layer.cpp://MFI_HOOK
injection 1:if(u.sequence>1000 && u.sequence<1100) {u.del[0]=100;}
"""
for line in infile:
self.inj_line = line;
l = line.split(':')
if l[0].startswith('injection'):
curr_inj = int(l[0].split(' ')[1])
self.curr_inj = curr_inj
if curr_inj >= self.starting_inj_num:
self.__mfi_insert_code(file_name, mfi_hook, l[1])
logger.info(line)
if self._need_compile(l[1]):
self._compile_raven()
else:
# Skip compile for faultfree runs
print "skip compiling!!!"
time.sleep(7)
self._run_experiment()
if self.title == 'mfi2_empty_test':
self.__run_parse_plot(0, self.curr_inj,self.traj)
else:
self.__run_parse_plot(1, self.curr_inj,self.traj)
self.__copy_files()
if self.curr_inj == self.end_inj_num:
break
elif l[0].startswith('location'):
file_name = l[1]
mfi_hook = l[2]
logger.info("Location: %s:%s" % (file_name, mfi_hook))
elif l[0].startswith('title'):
self.title = l[1].rstrip()
logger.info("Experiment Title: " + self.title)
self.__setup_result_folder()
# Delete if result_folder is empty
try:
os.rmdir(self.result_folder)
except OSError as ex:
pass
def signal_handler(self, signal, frame):
""" Signal handler to catch Ctrl+C to shutdown everything"""
print "Ctrl+C Pressed!"
self.__quit()
sys.exit(0)
def run(self):
""" Run Raven experiments """
if self.injection == 'mfi':
self._run_mfi_experiment()
elif self.injection == 'mfi2':
self._run_mfi2_experiment()
else:
self._compile_raven() #comment out any time you change mode from rob to sim
self._run_experiment()
#self.__run_parse_plot(0,-1)
os.system("python parse_plot.py 0 -1 "+self.traj)
# Main code starts here
# Init Logger
logger = logging.getLogger(__name__)
initLogger(logger, 'mfi.log')
# Get raven_home directory
env = os.environ.copy()
splits = env['ROS_PACKAGE_PATH'].split(':')
raven_home = splits[0]
golden_home = raven_home+'/golden_run'
print '\nRaven Home Found to be: '+ raven_home
#rsp_func()
usage = "Usage: python run.py <sim|dyn_sim|rob|detect}> <1:packet_gen|0:gui> <none|mfi:start#|mfi2:start#> <traj2|traj3>"
# Parse the arguments
try:
script, mode, packet_gen, injection, trajectory = argv
except:
print "Error: missing parameters"
print usage
sys.exit(2)
if mode == "sim":
print "Run Simulation"
elif mode == "dyn_sim":
print "Run Dynamic Simulation"
elif mode == "rob":
print "Run Real Robot"
elif mode == "detect":
print "Run Real Robot with Dynamic Model Detector"
else:
print usage
sys.exit(2)
# Init Raven
raven = Raven(raven_home, mode, packet_gen, injection, trajectory)
signal.signal(signal.SIGINT, raven.signal_handler)
# Run Raven
raven.run()
| lgpl-3.0 |
xhaju/lab-nanny | test/animate_live.py | 1 | 5562 | """
Animate live:
Plotting utility for SerialCommManager.
The fast plotting is done by only updating the changing parts of the plot
(see bastibe.de/2013-05-30-speeding-up-matplotlib.html )
Author: David Paredes
2016
"""
from communications import SerialCommManager as SCM
#from communications import SerialDataFetcher as SCM
import matplotlib.pyplot as plt
import numpy as np
import time
from collections import deque
import matplotlib.animation as animation
from serial.serialutil import SerialException
plt.ion()
NBITRESOLUTION = 10
class AnimationPlot:
""" adapted from http://electronut.in/plotting-real-time-data-from-arduino-using-python/
"""
def __init__(self, maxLen=1000,number_of_queues=8):
# open serial port
self.plotter = SCM.SerialCommManager(0.01, verbose=False)
self.number_of_queues = number_of_queues
self.y_buffers = [deque([0.0]*maxLen) for element in range(self.number_of_queues)]
self.ax = deque([0.0]*maxLen)
self.maxLen = maxLen
def addToBuf(self, buf, val):
""" Adds a value to a buffer
If the buffer is full (number of elements> self.maxLen) then we "pop" a value from the list,
to keep the buffer size constant.
:param buf:
:param val:
:return:
"""
if len(buf) < self.maxLen:
buf.append(val)
else:
buf.pop()
buf.appendleft(val)
def add(self, data):
""" Adds data to the plotting object.
The x-axis data is the first element, and the y-axis elements from different channels are the rest..
:param data:
:return:
"""
#assert(len(data) == 9)
self.addToBuf(self.ax, data[0])
for ii in range(self.number_of_queues):
self.addToBuf(self.y_buffers[ii],data[ii+1])
def acquire_data(self):
""" Acquires the data from the self.plotter SerialDataFetcher instance.
The data is put into the buffers self.y_buffers using the self.add function
:return:
"""
try:
#st = time.clock()
t, (C0, C1,C2,C3,C4,C5,C6,C7) = self.plotter.poll_arduino()
#print 'Time spent reading {}'.format(time.clock()-st)
if(len([t,C0,C1,C2,C3,C4,C5,C6,C7]) == 9):
self.add([0,C0,C1,C2,C3,
C4,C5,C6,C7])
#print 'Time spent updating {}'.format(time.clock()-st)
except KeyboardInterrupt:
print('exiting?')
return t
def update_plot(self,lines,axes,fig):
""" Method to update only the lines and the background patch of the plots.
This code speeds up the plotting.
:param lines:
:param axes:
:param fig:
:return:
"""
for ii,(line,ax) in enumerate(zip(lines, axes)):
line.set_data(range(self.maxLen), self.y_buffers[ii])
ax.draw_artist(ax.patch)
ax.draw_artist(line)
fig.canvas.update()
fig.canvas.flush_events()
def error_plot(self,lines,axes,fig,ON=True):
""" When an error occurs, change the color of the lines to red.
:param lines:
:param axes:
:param fig:
:param ON: Entering the error changes the lines to red. When ON=False (exiting), changes them back to black
:return:
"""
if ON:
color='r'
else:
color='k'
for ii,(line,ax) in enumerate(zip(lines, axes)):
line.set_data(range(self.maxLen), self.y_buffers[ii])
line.set_color(color)
ax.draw_artist(ax.patch)
ax.draw_artist(line)
fig.canvas.update()
fig.canvas.flush_events()
def update(self,frameNum,lines,axes,fig):
self.acquire_data()
self.update_plot(lines,axes,fig)
def initialise_plot(ax,style):
return ax.plot([],[],style,animated=True)[0]
def main():
""" Get data logger data and plot it. Convert raw 12-bit ADC data to voltage """
st2 = time.clock()
maxLen=100
number_acquisitions=2
animationPlot = AnimationPlot(maxLen)
plt.show(block=False)
fig, axes = plt.subplots(ncols=4,nrows=2,sharey=True, sharex=True,figsize=(15,6))
axes = [item for sublist in axes for item in sublist]
fig.show()
fig.canvas.draw()
styles = ['k','k','k','k','k','k','k','k']
lines = [initialise_plot(ax,style) for ax,style in zip(axes,styles)]
fig.canvas.draw()
init_time = animationPlot.update(0,lines,axes,fig)
#Calibrate axis
plt.title('calibrating')
plt.title('live')
for number,ax in enumerate(axes):
ax.set_ylim(0,2**NBITRESOLUTION-1)
ax.set_xlim(0,maxLen)
label = 'A{0:d} (V)'.format(number)
ax.set_ylabel(label)
#myText=axes[7].text(maxLen/2,1,'Some')
fig.canvas.draw()
st = 0
fps=0
while True:
try: #Maybe put this "try" in the update function? I don't know how it would deal with events.
st = time.clock()
animationPlot.update(0,lines,axes,fig)
# myText.set_text('{}'.format(fps))
fps = 1.0/(time.clock()-st)
print 'fps {}'.format(fps)
except SerialException:
animationPlot.error_plot(lines,axes,fig,True)
print 'Lost connection on {}, curr time {}'.format(st, time.clock())
time.sleep(1)
animationPlot.error_plot(lines,axes,fig,False)
print('exiting.')
if __name__ == '__main__':
main() | mit |
nikitasingh981/scikit-learn | sklearn/metrics/__init__.py | 28 | 3604 | """
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import label_ranking_loss
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import cohen_kappa_score
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .classification import brier_score_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import fowlkes_mallows_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import calinski_harabaz_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import mean_squared_log_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import make_scorer
from .scorer import SCORERS
from .scorer import get_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'label_ranking_loss',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'mean_squared_log_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
'brier_score_loss',
]
| bsd-3-clause |
neogi/machine-learning | classification/logistic_regression/test_logistic_regression_mle_sg.py | 1 | 6266 | # Imports
import sframe
import numpy as np
import logistic_regression_mle_sg as lr_mle_sg
import matplotlib.pyplot as plt
# Function to plot likelihood curves
def make_plot(log_likelihood_all, len_data, batch_size, smoothing_window=1, label=''):
plt.rcParams.update({'figure.figsize': (9,5)})
log_likelihood_all_ma = np.convolve(np.array(log_likelihood_all), \
np.ones((smoothing_window,))/smoothing_window, mode='valid')
plt.plot(np.array(range(smoothing_window-1, len(log_likelihood_all)))*float(batch_size)/len_data,
log_likelihood_all_ma, linewidth=4.0, label=label)
plt.rcParams.update({'font.size': 16})
plt.tight_layout()
plt.xlabel('# of passes over data')
plt.ylabel('Average log likelihood per data point')
plt.legend(loc='lower right', prop={'size':14})
# Read data
products = sframe.SFrame('../data/Week02/amazon_baby_subset.gl/')
# Set of important words; these will be the features
list_of_words = ["baby", "one", "great", "love", "use", "would", "like", "easy", "little", "seat", "old", "well", "get", "also", "really", "son", "time", "bought", "product", "good", "daughter", "much", "loves", "stroller", "put", "months", "car", "still", "back", "used", "recommend", "first", "even", "perfect", "nice", "bag", "two", "using", "got", "fit", "around", "diaper", "enough", "month", "price", "go", "could", "soft", "since", "buy", "room", "works", "made", "child", "keep", "size", "small", "need", "year", "big", "make", "take", "easily", "think", "crib", "clean", "way", "quality", "thing", "better", "without", "set", "new", "every", "cute", "best", "bottles", "work", "purchased", "right", "lot", "side", "happy", "comfortable", "toy", "able", "kids", "bit", "night", "long", "fits", "see", "us", "another", "play", "day", "money", "monitor", "tried", "thought", "never", "item", "hard", "plastic", "however", "disappointed", "reviews", "something", "going", "pump", "bottle", "cup", "waste", "return", "amazon", "different", "top", "want", "problem", "know", "water", "try", "received", "sure", "times", "chair", "find", "hold", "gate", "open", "bottom", "away", "actually", "cheap", "worked", "getting", "ordered", "came", "milk", "bad", "part", "worth", "found", "cover", "many", "design", "looking", "weeks", "say", "wanted", "look", "place", "purchase", "looks", "second", "piece", "box", "pretty", "trying", "difficult", "together", "though", "give", "started", "anything", "last", "company", "come", "returned", "maybe", "took", "broke", "makes", "stay", "instead", "idea", "head", "said", "less", "went", "working", "high", "unit", "seems", "picture", "completely", "wish", "buying", "babies", "won", "tub", "almost", "either"]
# The label
label = ['sentiment']
# Remove punctuations
products['review_clean'] = products['review'].apply(lr_mle_sg.remove_punctuation)
# For each important word add a new column and determine count of that word in all reviews
for word in list_of_words:
products[word] = products['review_clean'].apply(lambda x: x.split().count(word))
train_data, validation_data = products.random_split(.9, seed=1)
# Obtain train and validation matrices and label arrays from the corresponding SFrames
(train_matrix, train_label) = lr_mle_sg.get_data(train_data, list_of_words, label)
train_label = train_label[:, 0]
(validation_matrix, validation_label) = lr_mle_sg.get_data(validation_data, list_of_words, label)
validation_label = validation_label[:, 0]
# Evaluate logistic regression using stochastic gradient ascent
initial_coefficients = np.zeros(194)
step_size = 5.0e-1
batch_size = 1
max_iter = 10
(coefficients, log_likelihood_all) = lr_mle_sg.logistic_regression_SG(train_matrix, train_label, initial_coefficients, step_size, batch_size, max_iter)
initial_coefficients = np.zeros(194)
step_size = 5.0e-1
batch_size = len(train_matrix)
max_iter = 200
(coefficients, log_likelihood_all) = lr_mle_sg.logistic_regression_SG(train_matrix, train_label, initial_coefficients, step_size, batch_size, max_iter)
initial_coefficients = np.zeros(194)
step_size = 1.0e-1
batch_size = 100
num_passes = 10
num_iter = num_passes * int(len(train_matrix)/batch_size)
(coefficients_sgd, log_likelihood_sgd) = lr_mle_sg.logistic_regression_SG(train_matrix, train_label, initial_coefficients, step_size, batch_size, num_iter)
make_plot(log_likelihood_sgd, len_data=len(train_matrix), batch_size=100, label='stochastic gradient, step_size=1e-1')
make_plot(log_likelihood_sgd, len_data=len(train_matrix), batch_size=100, smoothing_window=30, label='stochastic gradient, step_size=1e-1')
# Batch size less than number of training samples
initial_coefficients = np.zeros(194)
step_size = 1.0e-1
batch_size = 100
num_passes = 200
num_iter = num_passes * int(len(train_matrix)/batch_size)
(coefficients_sgd, log_likelihood_sgd) = lr_mle_sg.logistic_regression_SG(train_matrix, train_label, initial_coefficients, step_size, batch_size, num_iter)
make_plot(log_likelihood_sgd, len_data=len(train_matrix), batch_size=100, smoothing_window=30, label='stochastic, step_size=1e-1')
# Batch size equal to number of training samples
initial_coefficients = np.zeros(194)
step_size = 5.0e-1
batch_size = len(train_matrix)
num_passes = 200
num_iter = num_passes * int(len(train_matrix)/batch_size)
(coefficients_batch, log_likelihood_batch) = lr_mle_sg.logistic_regression_SG(train_matrix, train_label, initial_coefficients, step_size, batch_size, num_iter)
make_plot(log_likelihood_batch, len_data=len(train_matrix), batch_size=len(train_matrix), smoothing_window=1, label='batch, step_size=5e-1')
# Evaluate effect of step size on stochastic gradient ascent
initial_coefficients = np.zeros(194)
batch_size = 100
num_passes = 10
num_iter = num_passes * int(len(train_matrix)/batch_size)
coefficients_sgd = {}
log_likelihood_sgd = {}
for step_size in np.logspace(-4, 2, num=7):
(coefficients_sgd[step_size], log_likelihood_sgd[step_size]) = lr_mle_sg.logistic_regression_SG(train_matrix, train_label, initial_coefficients, step_size, batch_size, num_iter)
for step_size in np.logspace(-4, 2, num=7)[0:6]:
make_plot(log_likelihood_sgd[step_size], len_data=len(train_data), batch_size=100, smoothing_window=30, label='step_size=%.1e'%step_size)
| gpl-3.0 |
Intel-Corporation/tensorflow | tensorflow/lite/experimental/micro/examples/micro_speech/apollo3/captured_data_to_wav.py | 11 | 1442 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts values pulled from the microcontroller into audio files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import struct
# import matplotlib.pyplot as plt
import numpy as np
import soundfile as sf
def new_data_to_array(fn):
vals = []
with open(fn) as f:
for n, line in enumerate(f):
if n != 0:
vals.extend([int(v, 16) for v in line.split()])
b = ''.join(map(chr, vals))
y = struct.unpack('<' + 'h' * int(len(b) / 2), b)
return y
data = 'captured_data.txt'
values = np.array(new_data_to_array(data)).astype(float)
# plt.plot(values, 'o-')
# plt.show(block=False)
wav = values / np.max(np.abs(values))
sf.write('captured_data.wav', wav, 16000)
| apache-2.0 |
islanderz/paparazzi | sw/tools/calibration/calibration_utils.py | 11 | 12542 |
# Copyright (C) 2010 Antoine Drouin
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
from __future__ import print_function, division
import re
import numpy as np
from numpy import sin, cos
from scipy import linalg, stats
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def get_ids_in_log(filename):
"""Returns available ac_id from a log."""
f = open(filename, 'r')
ids = []
pattern = re.compile("\S+ (\S+)")
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
ac_id = m.group(1)
if not ac_id in ids:
ids.append(ac_id)
return ids
def read_log(ac_id, filename, sensor):
"""Extracts raw sensor measurements from a log."""
f = open(filename, 'r')
pattern = re.compile("(\S+) "+ac_id+" IMU_"+sensor+"_RAW (\S+) (\S+) (\S+)")
list_meas = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
list_meas.append([float(m.group(2)), float(m.group(3)), float(m.group(4))])
return np.array(list_meas)
def read_log_scaled(ac_id, filename, sensor, t_start, t_end):
"""Extracts scaled sensor measurements from a log."""
f = open(filename, 'r')
pattern = re.compile("(\S+) "+ac_id+" IMU_"+sensor+"_SCALED (\S+) (\S+) (\S+)")
list_meas = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
if (float(m.group(1)) >= float(t_start)) and (float(m.group(1)) < (float(t_end)+1.0)):
list_meas.append([float(m.group(1)), float(m.group(2)), float(m.group(3)), float(m.group(4))])
return np.array(list_meas)
def read_log_mag_current(ac_id, filename):
"""Extracts raw magnetometer and current measurements from a log."""
f = open(filename, 'r')
pattern = re.compile("(\S+) "+ac_id+" IMU_MAG_CURRENT_CALIBRATION (\S+) (\S+) (\S+) (\S+)")
list_meas = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
list_meas.append([float(m.group(2)), float(m.group(3)), float(m.group(4)), float(m.group(5))])
return np.array(list_meas)
def filter_meas(meas, window_size, noise_threshold):
"""Select only non-noisy data."""
filtered_meas = []
filtered_idx = []
for i in range(window_size, len(meas)-window_size):
noise = meas[i-window_size:i+window_size, :].std(axis=0)
if linalg.norm(noise) < noise_threshold:
filtered_meas.append(meas[i, :])
filtered_idx.append(i)
return np.array(filtered_meas), filtered_idx
def get_min_max_guess(meas, scale):
"""Initial boundary based calibration."""
max_meas = meas[:, :].max(axis=0)
min_meas = meas[:, :].min(axis=0)
range = max_meas - min_meas
# check if we would get division by zero
if range.all():
n = (max_meas + min_meas) / 2
sf = 2*scale/range
return np.array([n[0], n[1], n[2], sf[0], sf[1], sf[2]])
else:
return np.array([0, 0, 0, 0])
def scale_measurements(meas, p):
"""Scale the set of measurements."""
l_comp = []
l_norm = []
for m in meas[:, ]:
sm = (m - p[0:3])*p[3:6]
l_comp.append(sm)
l_norm.append(linalg.norm(sm))
return np.array(l_comp), np.array(l_norm)
def estimate_mag_current_relation(meas):
"""Calculate linear coefficient of magnetometer-current relation."""
coefficient = []
for i in range(0, 3):
gradient, intercept, r_value, p_value, std_err = stats.linregress(meas[:, 3], meas[:, i])
coefficient.append(gradient)
return coefficient
def print_xml(p, sensor, res):
"""Print xml for airframe file."""
print("")
print("<define name=\""+sensor+"_X_NEUTRAL\" value=\""+str(int(round(p[0])))+"\"/>")
print("<define name=\""+sensor+"_Y_NEUTRAL\" value=\""+str(int(round(p[1])))+"\"/>")
print("<define name=\""+sensor+"_Z_NEUTRAL\" value=\""+str(int(round(p[2])))+"\"/>")
print("<define name=\""+sensor+"_X_SENS\" value=\""+str(p[3]*2**res)+"\" integer=\"16\"/>")
print("<define name=\""+sensor+"_Y_SENS\" value=\""+str(p[4]*2**res)+"\" integer=\"16\"/>")
print("<define name=\""+sensor+"_Z_SENS\" value=\""+str(p[5]*2**res)+"\" integer=\"16\"/>")
print("")
def print_imu_scaled(sensor, measurements, attrs):
print("")
print(sensor+" : Time Range("+str(measurements[:,0].min(axis=0))+" : "+str(measurements[:,0].max(axis=0))+")")
np.set_printoptions(formatter={'float': '{:-7.3f}'.format})
print(" " + attrs[2] + " " + attrs[3] + " " + attrs[4])
print("Min " + str(measurements[:,1:].min(axis=0)*attrs[0]) + " " + attrs[1])
print("Max " + str(measurements[:,1:].max(axis=0)*attrs[0]) + " " + attrs[1])
print("Mean " + str(measurements[:,1:].mean(axis=0)*attrs[0]) + " " + attrs[1])
print("StDev " + str(measurements[:,1:].std(axis=0)*attrs[0]) + " " + attrs[1])
def plot_results(sensor, measurements, flt_idx, flt_meas, cp0, np0, cp1, np1, sensor_ref, blocking=True):
"""Plot calibration results."""
# plot raw measurements with filtered ones marked as red circles
plt.subplot(3, 1, 1)
plt.plot(measurements[:, 0])
plt.plot(measurements[:, 1])
plt.plot(measurements[:, 2])
plt.plot(flt_idx, flt_meas[:, 0], 'ro')
plt.plot(flt_idx, flt_meas[:, 1], 'ro')
plt.plot(flt_idx, flt_meas[:, 2], 'ro')
plt.ylabel('ADC')
plt.title('Raw '+sensor+', red dots are actually used measurements')
plt.tight_layout()
# show scaled measurements with initial guess
plt.subplot(3, 2, 3)
plt.plot(cp0[:, 0])
plt.plot(cp0[:, 1])
plt.plot(cp0[:, 2])
plt.plot(-sensor_ref*np.ones(len(flt_meas)))
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.title('scaled '+sensor+' (initial guess)')
plt.xticks([])
plt.subplot(3, 2, 4)
plt.plot(np0)
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.title('norm of '+sensor+' (initial guess)')
plt.xticks([])
# show scaled measurements after optimization
plt.subplot(3, 2, 5)
plt.plot(cp1[:, 0])
plt.plot(cp1[:, 1])
plt.plot(cp1[:, 2])
plt.plot(-sensor_ref*np.ones(len(flt_meas)))
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.title('scaled '+sensor+' (optimized)')
plt.xticks([])
plt.subplot(3, 2, 6)
plt.plot(np1)
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.title('norm of '+sensor+' (optimized)')
plt.xticks([])
# if we want to have another plot we only draw the figure (non-blocking)
# also in matplotlib before 1.0.0 there is only one call to show possible
if blocking:
plt.show()
else:
plt.draw()
def plot_imu_scaled(sensor, measurements, attrs):
"""Plot imu scaled results."""
plt.figure("Sensor Scaled")
plt.subplot(4, 1, 1)
plt.plot(measurements[:, 0], measurements[:, 1]*attrs[0])
plt.plot(measurements[:, 0], measurements[:, 2]*attrs[0])
plt.plot(measurements[:, 0], measurements[:, 3]*attrs[0])
#plt.xlabel('Time (s)')
plt.ylabel(attrs[1])
plt.title(sensor)
plt.subplot(4, 1, 2)
plt.plot(measurements[:, 0], measurements[:, 1]*attrs[0], 'b')
#plt.xlabel('Time (s)')
plt.ylabel(attrs[2])
plt.subplot(4, 1, 3)
plt.plot(measurements[:, 0], measurements[:, 2]*attrs[0], 'g')
#plt.xlabel('Time (s)')
plt.ylabel(attrs[3])
plt.subplot(4, 1, 4)
plt.plot(measurements[:, 0], measurements[:, 3]*attrs[0], 'r')
plt.xlabel('Time (s)')
plt.ylabel(attrs[4])
plt.show()
def plot_imu_scaled_fft(sensor, measurements, attrs):
"""Plot imu scaled fft results."""
#dt = 0.0769
#Fs = 1/dt
Fs = 26.0
plt.figure("Sensor Scaled - FFT")
plt.subplot(3, 1, 1)
plt.magnitude_spectrum(measurements[:, 1]*attrs[0], Fs=Fs, scale='linear')
plt.ylabel(attrs[2])
plt.title(sensor)
plt.subplot(3, 1, 2)
plt.magnitude_spectrum(measurements[:, 2]*attrs[0], Fs=Fs, scale='linear')
plt.ylabel(attrs[3])
plt.subplot(3, 1, 3)
plt.magnitude_spectrum(measurements[:, 3]*attrs[0], Fs=Fs, scale='linear')
plt.xlabel('Frequency')
plt.ylabel(attrs[4])
plt.show()
def plot_mag_3d(measured, calibrated, p):
"""Plot magnetometer measurements on 3D sphere."""
# set up points for sphere and ellipsoid wireframes
u = np.r_[0:2 * np.pi:20j]
v = np.r_[0:np.pi:20j]
wx = np.outer(cos(u), sin(v))
wy = np.outer(sin(u), sin(v))
wz = np.outer(np.ones(np.size(u)), cos(v))
ex = p[0] * np.ones(np.size(u)) + np.outer(cos(u), sin(v)) / p[3]
ey = p[1] * np.ones(np.size(u)) + np.outer(sin(u), sin(v)) / p[4]
ez = p[2] * np.ones(np.size(u)) + np.outer(np.ones(np.size(u)), cos(v)) / p[5]
# measurements
mx = measured[:, 0]
my = measured[:, 1]
mz = measured[:, 2]
# calibrated values
cx = calibrated[:, 0]
cy = calibrated[:, 1]
cz = calibrated[:, 2]
# axes size
left = 0.02
bottom = 0.05
width = 0.46
height = 0.9
rect_l = [left, bottom, width, height]
rect_r = [left/2+0.5, bottom, width, height]
fig = plt.figure(figsize=plt.figaspect(0.5))
if matplotlib.__version__.startswith('0'):
ax = Axes3D(fig, rect=rect_l)
else:
ax = fig.add_subplot(1, 2, 1, position=rect_l, projection='3d')
# plot measurements
ax.scatter(mx, my, mz)
plt.hold(True)
# plot line from center to ellipsoid center
ax.plot([0.0, p[0]], [0.0, p[1]], [0.0, p[2]], color='black', marker='+', markersize=10)
# plot ellipsoid
ax.plot_wireframe(ex, ey, ez, color='grey', alpha=0.5)
# Create cubic bounding box to simulate equal aspect ratio
max_range = np.array([mx.max() - mx.min(), my.max() - my.min(), mz.max() - mz.min()]).max()
Xb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][0].flatten() + 0.5 * (mx.max() + mx.min())
Yb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][1].flatten() + 0.5 * (my.max() + my.min())
Zb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][2].flatten() + 0.5 * (mz.max() + mz.min())
# add the fake bounding box:
for xb, yb, zb in zip(Xb, Yb, Zb):
ax.plot([xb], [yb], [zb], 'w')
ax.set_title('MAG raw with fitted ellipsoid and center offset')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
if matplotlib.__version__.startswith('0'):
ax = Axes3D(fig, rect=rect_r)
else:
ax = fig.add_subplot(1, 2, 2, position=rect_r, projection='3d')
ax.plot_wireframe(wx, wy, wz, color='grey', alpha=0.5)
plt.hold(True)
ax.scatter(cx, cy, cz)
ax.set_title('MAG calibrated on unit sphere')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_xlim3d(-1, 1)
ax.set_ylim3d(-1, 1)
ax.set_zlim3d(-1, 1)
plt.show()
def read_turntable_log(ac_id, tt_id, filename, _min, _max):
""" Read a turntable log.
return an array which first column is turnatble and next 3 are gyro
"""
f = open(filename, 'r')
pattern_g = re.compile("(\S+) "+str(ac_id)+" IMU_GYRO_RAW (\S+) (\S+) (\S+)")
pattern_t = re.compile("(\S+) "+str(tt_id)+" IMU_TURNTABLE (\S+)")
last_tt = None
list_tt = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern_t, line)
if m:
last_tt = float(m.group(2))
m = re.match(pattern_g, line)
if m and last_tt and _min < last_tt < _max:
list_tt.append([last_tt, float(m.group(2)), float(m.group(3)), float(m.group(4))])
return np.array(list_tt)
| gpl-2.0 |
zharuosi/2017 | pythonNRC/plot.py | 1 | 7724 | #!/usr/bin/python2.7
#-*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib import rc
import math
import decimal
import csv
import os
#--------------------------------------------------------------------#
for alpha in range(0,5):
#alpha = 0
plt.rcParams["legend.fontsize"]=28
plt.rcParams["font.size"]=22
plt.figure(figsize=(16,12),dpi=200)
rc('font',**{'family':'serif','serif':['Times']})
rc('text', usetex=True)
if alpha > 0:
angle_type = 'P'
elif alpha < 0:
angle_type = 'N'
else:
angle_type = 'Z'
#foil_type = 'no-flat'
#foil_type = '0p5mm'
#foil_type = '0p25mm'
foil_type = '0p094mm'
data_type = 'pressure-'+foil_type
alpha100 = 100*alpha
str_alpha = str(decimal.Decimal("%.2f" % alpha))
str_alpha100 = str(alpha100)
xlimit_global = [-0.1,1.1]
ylimit_global = [-0.6,1.2]
xlimit_local = [-0.005,0.011]
if foil_type == 'no-flat':
if alpha == 0:
ylimit_local = [-0.2,0.6]
sub_fig_location = [0.5,0.5,0.4,0.3] # [left,bottom,width,height]
elif alpha == 1:
ylimit_local = [-0.55,-0.11]
sub_fig_location = [0.5,0.5,0.4,0.3] # [left,bottom,width,height]
elif alpha == 2:
ylimit_local = [-2.5,-0.5]
sub_fig_location = [0.5,0.2,0.4,0.3] # [left,bottom,width,height]
elif alpha == 3:
ylimit_local = [-6.0,-0.5]
sub_fig_location = [0.5,0.2,0.4,0.3] # [left,bottom,width,height]
elif alpha == 4:
ylimit_local = [-10.5,-0.5]
sub_fig_location = [0.5,0.2,0.4,0.3] # [left,bottom,width,height]
elif foil_type == '0p5mm':
if alpha == 0:
ylimit_local = [-0.4,0.6]
sub_fig_location = [0.5,0.5,0.4,0.3] # [left,bottom,width,height]
elif alpha == 1:
ylimit_local = [-1.5,0.35]
sub_fig_location = [0.5,0.15,0.4,0.3] # [left,bottom,width,height]
elif alpha == 2:
ylimit_local = [-2.7,-0.5]
sub_fig_location = [0.5,0.2,0.4,0.3] # [left,bottom,width,height]
elif alpha == 3:
ylimit_local = [-6.0,-0.5]
sub_fig_location = [0.5,0.2,0.4,0.3] # [left,bottom,width,height]
elif alpha == 4:
ylimit_local = [-3.5,-0.5]
sub_fig_location = [0.5,0.2,0.4,0.3] # [left,bottom,width,height]
elif foil_type == '0p25mm':
if alpha == 0:
ylimit_local = [-0.4,0.6]
sub_fig_location = [0.5,0.5,0.4,0.3] # [left,bottom,width,height]
elif alpha == 1:
ylimit_local = [-1.5,0.35]
sub_fig_location = [0.3,0.6,0.4,0.3] # [left,bottom,width,height]
elif alpha == 2:
ylimit_local = [-2.7,-0.5]
sub_fig_location = [0.5,0.2,0.4,0.3] # [left,bottom,width,height]
elif alpha == 3:
ylimit_local = [-6.0,-0.5]
sub_fig_location = [0.5,0.2,0.4,0.3] # [left,bottom,width,height]
elif alpha == 4:
ylimit_local = [-3.5,-0.5]
sub_fig_location = [0.5,0.2,0.4,0.3] # [left,bottom,width,height]
elif foil_type == '0p094mm':
if alpha == 0:
ylimit_local = [-0.4,0.6]
sub_fig_location = [0.5,0.5,0.4,0.3] # [left,bottom,width,height]
elif alpha == 1:
ylimit_local = [-1.5,0.35]
sub_fig_location = [0.3,0.6,0.4,0.3] # [left,bottom,width,height]
elif alpha == 2:
ylimit_local = [-2.7,-0.5]
sub_fig_location = [0.5,0.2,0.4,0.3] # [left,bottom,width,height]
elif alpha == 3:
ylimit_local = [-6.0,-0.5]
sub_fig_location = [0.5,0.2,0.4,0.3] # [left,bottom,width,height]
elif alpha == 4:
ylimit_local = [-3.5,-0.5]
sub_fig_location = [0.5,0.2,0.4,0.3] # [left,bottom,width,height]
#----------------------------------Variables----------------------------------#
file_StarCCM = './afoil-'+foil_type+'/'+angle_type+str_alpha+'/8C/PressureOnFoil.csv'
file_David = './David/'+str(int(alpha))+'deg/'+data_type+'.dat'
file_output_eps = 'Cp_afoil_'+foil_type+'_'+angle_type+str_alpha100+'.eps'
file_title = 'afoil-'+foil_type+r' (camber ratio=0.014, $t=0.0416$ and $\alpha='+str(int(alpha))+r'^{\circ}$)'
file_output_eps = file_output_eps.replace('-', '_')
print file_title
print file_output_eps
#----------------------------------Variables----------------------------------#
# afoil-0p5mm-P1.00
# file_StarCCM = './afoil-0p5mm/P1.00/8C/PressureOnFoil.csv'
# file_David = './David/1deg/pressure-0p5mm.dat'
# file_output_eps = 'Cp_afoil_0p5mm_P100.eps'
# file_title = 'afoil-0p5mm (camber ratio=0.014, $t=0.0416$ and $\alpha=1^{\circ}$)'
# xlimit_global = [-0.1,1.1]
# ylimit_global = [-0.6,1.2]
# xlimit_local = [-0.02,0.08]
# ylimit_local = [-0.5,-0.25]
# #left,bottom,width,height
# sub_fig_location = [0.5,0.5,0.4,0.3]
# x coordinate
x = []
# pressure coefficient
y = []
with open(file_StarCCM) as csvfile:
#with open('./afoil-0p5mm/P1.00/8C/PressureOnFoil.csv') as csvfile:
data = csv.reader(csvfile,delimiter=',')
# ignore the first row
next(data, None)
# read data and transfer to float into the array x and y
for row in data:
x.append(float(row[0]))
y.append(float(row[1]))
# initialize a 2-D array to sort the raw data
temp = [[0 for col in range(2)] for row in range(len(x))]
for i in range(len(x)):
temp[i][0] = x[i]
temp[i][1] = y[i]
# sorting is based on the first column by default
temp.sort()
# xx and yy are used to plot
xx = []
yy = []
for i in range(len(x)):
xx.append(temp[i][0])
yy.append(temp[i][1])
plot1=plt.plot(xx,yy,'ro',markersize=7,markeredgewidth=0.5,markeredgecolor='red',markerfacecolor='white',label=r'STAR-CCM+')
data1 = np.loadtxt(file_David)
x1 = data1[:,0]
y1 = data1[:,3]
plot1=plt.plot(x1,y1,'bx',markersize=7,label=r'David results')
new_ticks=np.linspace(0,1.0,11)
plt.xticks(new_ticks)
max_Cp = max(max(yy),max(y1))
min_Cp = min(min(yy),min(y1))
print min_Cp,max_Cp
# avoid the condition when alpha==0
ylimit_global[0] = min_Cp - 0.2*max(alpha,1)
ylimit_global[1] = max_Cp + 0.2*max(alpha,1)
#plt.gca().xaxis.set_major_formatter(ticker.FormatStrFormatter('%.1f'))
plt.xlim(xlimit_global[0],xlimit_global[1])
plt.ylim(ylimit_global[0],ylimit_global[1])
plt.xlabel (r'$x$ (m)',fontsize=28)
plt.ylabel (r'$C_p$',fontsize=28)
plt.title(file_title)
#plt.legend(loc='best',numpoints=1)
#plt.legend(loc='upper left',numpoints=1)
l=plt.legend(numpoints=1,loc='upper right')
l.get_frame().set_edgecolor('k')
plt.tight_layout()
# Subfig: position: left,bottom,width,height
rc('font',**{'family':'serif','serif':['Times'],'size':22})
a = plt.axes([sub_fig_location[0],sub_fig_location[1],sub_fig_location[2],sub_fig_location[3]],facecolor='w')
plot1=plt.plot(xx,yy,'or',markersize=5,markeredgewidth=0.5,markeredgecolor='red',markerfacecolor='white',label=r'STAR-CCM+')
plot2=plt.plot(x1,y1,'xb',markersize=5,label=r'David')
plt.xlim(xlimit_local[0],xlimit_local[1])
plt.ylim(ylimit_local[0],ylimit_local[1])
#plt.show()
#plt.grid(True)
plt.savefig(file_output_eps)
#plt.savefig("Cp_afoil_0p5mm_P100.eps")
| mit |
SchulichUAV/ardupilot | Tools/LogAnalyzer/tests/TestOptFlow.py | 26 | 14969 | from LogAnalyzer import Test,TestResult
import DataflashLog
from math import sqrt
import numpy as np
import matplotlib.pyplot as plt
class TestFlow(Test):
'''test optical flow sensor scale factor calibration'''
#
# Use the following procedure to log the calibration data. is assumed that the optical flow sensor has been
# correctly aligned, is focussed and the test is performed over a textured surface with adequate lighting.
# Note that the strobing effect from non incandescent artifical lighting can produce poor optical flow measurements.
#
# 1) Set LOG_DISARMED and FLOW_ENABLE to 1 and verify that ATT and OF messages are being logged onboard
# 2) Place on level ground, apply power and wait for EKF to complete attitude alignment
# 3) Keeping the copter level, lift it to shoulder height and rock between +-20 and +-30 degrees
# in roll about an axis that passes through the flow sensor lens assembly. The time taken to rotate from
# maximum left roll to maximum right roll should be about 1 second.
# 4) Repeat 3) about the pitch axis
# 5) Holding the copter level, lower it to the ground and remove power
# 6) Transfer the logfile from the sdcard.
# 7) Open a terminal and cd to the ardupilot/Tools/LogAnalyzer directory
# 8) Enter to run the analysis 'python LogAnalyzer.py <log file name including full path>'
# 9) Check the OpticalFlow test status printed to the screen. The analysis plots are saved to
# flow_calibration.pdf and the recommended scale factors to flow_calibration.param
def __init__(self):
Test.__init__(self)
self.name = "OpticalFlow"
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
def FAIL():
self.result.status = TestResult.StatusType.FAIL
def WARN():
if self.result.status != TestResult.StatusType.FAIL:
self.result.status = TestResult.StatusType.WARN
try:
# tuning parameters used by the algorithm
tilt_threshold = 15 # roll and pitch threshold used to start and stop calibration (deg)
quality_threshold = 124 # minimum flow quality required for data to be used by the curve fit (N/A)
min_rate_threshold = 0.0 # if the gyro rate is less than this, the data will not be used by the curve fit (rad/sec)
max_rate_threshold = 2.0 # if the gyro rate is greter than this, the data will not be used by the curve fit (rad/sec)
param_std_threshold = 5.0 # maximum allowable 1-std uncertainty in scaling parameter (scale factor * 1000)
param_abs_threshold = 200 # max/min allowable scale factor parameter. Values of FLOW_FXSCALER and FLOW_FYSCALER outside the range of +-param_abs_threshold indicate a sensor configuration problem.
min_num_points = 100 # minimum number of points required for a curve fit - this is necessary, but not sufficient condition - the standard deviation estimate of the fit gradient is also important.
# get the existing scale parameters
flow_fxscaler = logdata.parameters["FLOW_FXSCALER"]
flow_fyscaler = logdata.parameters["FLOW_FYSCALER"]
# load required optical flow data
if "OF" in logdata.channels:
flowX = np.zeros(len(logdata.channels["OF"]["flowX"].listData))
for i in range(len(logdata.channels["OF"]["flowX"].listData)):
(line, flowX[i]) = logdata.channels["OF"]["flowX"].listData[i]
bodyX = np.zeros(len(logdata.channels["OF"]["bodyX"].listData))
for i in range(len(logdata.channels["OF"]["bodyX"].listData)):
(line, bodyX[i]) = logdata.channels["OF"]["bodyX"].listData[i]
flowY = np.zeros(len(logdata.channels["OF"]["flowY"].listData))
for i in range(len(logdata.channels["OF"]["flowY"].listData)):
(line, flowY[i]) = logdata.channels["OF"]["flowY"].listData[i]
bodyY = np.zeros(len(logdata.channels["OF"]["bodyY"].listData))
for i in range(len(logdata.channels["OF"]["bodyY"].listData)):
(line, bodyY[i]) = logdata.channels["OF"]["bodyY"].listData[i]
flow_time_us = np.zeros(len(logdata.channels["OF"]["TimeUS"].listData))
for i in range(len(logdata.channels["OF"]["TimeUS"].listData)):
(line, flow_time_us[i]) = logdata.channels["OF"]["TimeUS"].listData[i]
flow_qual = np.zeros(len(logdata.channels["OF"]["Qual"].listData))
for i in range(len(logdata.channels["OF"]["Qual"].listData)):
(line, flow_qual[i]) = logdata.channels["OF"]["Qual"].listData[i]
else:
FAIL()
self.result.statusMessage = "FAIL: no optical flow data\n"
return
# load required attitude data
if "ATT" in logdata.channels:
Roll = np.zeros(len(logdata.channels["ATT"]["Roll"].listData))
for i in range(len(logdata.channels["ATT"]["Roll"].listData)):
(line, Roll[i]) = logdata.channels["ATT"]["Roll"].listData[i]
Pitch = np.zeros(len(logdata.channels["ATT"]["Pitch"].listData))
for i in range(len(logdata.channels["ATT"]["Pitch"].listData)):
(line, Pitch[i]) = logdata.channels["ATT"]["Pitch"].listData[i]
att_time_us = np.zeros(len(logdata.channels["ATT"]["TimeUS"].listData))
for i in range(len(logdata.channels["ATT"]["TimeUS"].listData)):
(line, att_time_us[i]) = logdata.channels["ATT"]["TimeUS"].listData[i]
else:
FAIL()
self.result.statusMessage = "FAIL: no attitude data\n"
return
# calculate the start time for the roll calibration
startTime = int(0)
startRollIndex = int(0)
for i in range(len(Roll)):
if abs(Roll[i]) > tilt_threshold:
startTime = att_time_us[i]
break
for i in range(len(flow_time_us)):
if flow_time_us[i] > startTime:
startRollIndex = i
break
# calculate the end time for the roll calibration
endTime = int(0)
endRollIndex = int(0)
for i in range(len(Roll)-1,-1,-1):
if abs(Roll[i]) > tilt_threshold:
endTime = att_time_us[i]
break
for i in range(len(flow_time_us)-1,-1,-1):
if flow_time_us[i] < endTime:
endRollIndex = i
break
# check we have enough roll data points
if (endRollIndex - startRollIndex <= min_num_points):
FAIL()
self.result.statusMessage = "FAIL: insufficient roll data pointsa\n"
return
# resample roll test data excluding data before first movement and after last movement
# also exclude data where there is insufficient angular rate
flowX_resampled = []
bodyX_resampled = []
flowX_time_us_resampled = []
for i in range(len(Roll)):
if (i >= startRollIndex) and (i <= endRollIndex) and (abs(bodyX[i]) > min_rate_threshold) and (abs(bodyX[i]) < max_rate_threshold) and (flow_qual[i] > quality_threshold):
flowX_resampled.append(flowX[i])
bodyX_resampled.append(bodyX[i])
flowX_time_us_resampled.append(flow_time_us[i])
# calculate the start time for the pitch calibration
startTime = 0
startPitchIndex = int(0)
for i in range(len(Pitch)):
if abs(Pitch[i]) > tilt_threshold:
startTime = att_time_us[i]
break
for i in range(len(flow_time_us)):
if flow_time_us[i] > startTime:
startPitchIndex = i
break
# calculate the end time for the pitch calibration
endTime = 0
endPitchIndex = int(0)
for i in range(len(Pitch)-1,-1,-1):
if abs(Pitch[i]) > tilt_threshold:
endTime = att_time_us[i]
break
for i in range(len(flow_time_us)-1,-1,-1):
if flow_time_us[i] < endTime:
endPitchIndex = i
break
# check we have enough pitch data points
if (endPitchIndex - startPitchIndex <= min_num_points):
FAIL()
self.result.statusMessage = "FAIL: insufficient pitch data pointsa\n"
return
# resample pitch test data excluding data before first movement and after last movement
# also exclude data where there is insufficient or too much angular rate
flowY_resampled = []
bodyY_resampled = []
flowY_time_us_resampled = []
for i in range(len(Roll)):
if (i >= startPitchIndex) and (i <= endPitchIndex) and (abs(bodyY[i]) > min_rate_threshold) and (abs(bodyY[i]) < max_rate_threshold) and (flow_qual[i] > quality_threshold):
flowY_resampled.append(flowY[i])
bodyY_resampled.append(bodyY[i])
flowY_time_us_resampled.append(flow_time_us[i])
# fit a straight line to the flow vs body rate data and calculate the scale factor parameter required to achieve a slope of 1
coef_flow_x , cov_x = np.polyfit(bodyX_resampled,flowX_resampled,1,rcond=None, full=False, w=None, cov=True)
coef_flow_y , cov_y = np.polyfit(bodyY_resampled,flowY_resampled,1,rcond=None, full=False, w=None, cov=True)
# taking the exisiting scale factor parameters into account, calculate the parameter values reequired to achieve a unity slope
flow_fxscaler_new = int(1000 * (((1 + 0.001 * float(flow_fxscaler))/coef_flow_x[0] - 1)))
flow_fyscaler_new = int(1000 * (((1 + 0.001 * float(flow_fyscaler))/coef_flow_y[0] - 1)))
# Do a sanity check on the scale factor variance
if sqrt(cov_x[0][0]) > param_std_threshold or sqrt(cov_y[0][0]) > param_std_threshold:
FAIL()
self.result.statusMessage = "FAIL: inaccurate fit - poor quality or insufficient data\nFLOW_FXSCALER 1STD = %u\nFLOW_FYSCALER 1STD = %u\n" % (round(1000*sqrt(cov_x[0][0])),round(1000*sqrt(cov_y[0][0])))
# Do a sanity check on the scale factors
if abs(flow_fxscaler_new) > param_abs_threshold or abs(flow_fyscaler_new) > param_abs_threshold:
FAIL()
self.result.statusMessage = "FAIL: required scale factors are excessive\nFLOW_FXSCALER=%i\nFLOW_FYSCALER=%i\n" % (flow_fxscaler,flow_fyscaler)
# display recommended scale factors
self.result.statusMessage = "Set FLOW_FXSCALER to %i\nSet FLOW_FYSCALER to %i\n\nCal plots saved to flow_calibration.pdf\nCal parameters saved to flow_calibration.param\n\nFLOW_FXSCALER 1STD = %u\nFLOW_FYSCALER 1STD = %u\n" % (flow_fxscaler_new,flow_fyscaler_new,round(1000*sqrt(cov_x[0][0])),round(1000*sqrt(cov_y[0][0])))
# calculate fit display data
body_rate_display = [-max_rate_threshold,max_rate_threshold]
fit_coef_x = np.poly1d(coef_flow_x)
flowX_display = fit_coef_x(body_rate_display)
fit_coef_y = np.poly1d(coef_flow_y)
flowY_display = fit_coef_y(body_rate_display)
# plot and save calibration test points to PDF
from matplotlib.backends.backend_pdf import PdfPages
output_plot_filename = "flow_calibration.pdf"
pp = PdfPages(output_plot_filename)
plt.figure(1,figsize=(20,13))
plt.subplot(2,1,1)
plt.plot(bodyX_resampled,flowX_resampled,'b', linestyle=' ', marker='o',label="test points")
plt.plot(body_rate_display,flowX_display,'r',linewidth=2.5,label="linear fit")
plt.title('X axis flow rate vs gyro rate')
plt.ylabel('flow rate (rad/s)')
plt.xlabel('gyro rate (rad/sec)')
plt.grid()
plt.legend(loc='upper left')
# draw plots
plt.subplot(2,1,2)
plt.plot(bodyY_resampled,flowY_resampled,'b', linestyle=' ', marker='o',label="test points")
plt.plot(body_rate_display,flowY_display,'r',linewidth=2.5,label="linear fit")
plt.title('Y axis flow rate vs gyro rate')
plt.ylabel('flow rate (rad/s)')
plt.xlabel('gyro rate (rad/sec)')
plt.grid()
plt.legend(loc='upper left')
pp.savefig()
plt.figure(2,figsize=(20,13))
plt.subplot(2,1,1)
plt.plot(flow_time_us,flowX,'b',label="flow rate - all")
plt.plot(flow_time_us,bodyX,'r',label="gyro rate - all")
plt.plot(flowX_time_us_resampled,flowX_resampled,'c', linestyle=' ', marker='o',label="flow rate - used")
plt.plot(flowX_time_us_resampled,bodyX_resampled,'m', linestyle=' ', marker='o',label="gyro rate - used")
plt.title('X axis flow and body rate vs time')
plt.ylabel('rate (rad/s)')
plt.xlabel('time (usec)')
plt.grid()
plt.legend(loc='upper left')
# draw plots
plt.subplot(2,1,2)
plt.plot(flow_time_us,flowY,'b',label="flow rate - all")
plt.plot(flow_time_us,bodyY,'r',label="gyro rate - all")
plt.plot(flowY_time_us_resampled,flowY_resampled,'c', linestyle=' ', marker='o',label="flow rate - used")
plt.plot(flowY_time_us_resampled,bodyY_resampled,'m', linestyle=' ', marker='o',label="gyro rate - used")
plt.title('Y axis flow and body rate vs time')
plt.ylabel('rate (rad/s)')
plt.xlabel('time (usec)')
plt.grid()
plt.legend(loc='upper left')
pp.savefig()
# close the pdf file
pp.close()
# close all figures
plt.close("all")
# write correction parameters to file
test_results_filename = "flow_calibration.param"
file = open(test_results_filename,"w")
file.write("FLOW_FXSCALER"+" "+str(flow_fxscaler_new)+"\n")
file.write("FLOW_FYSCALER"+" "+str(flow_fyscaler_new)+"\n")
file.close()
except KeyError as e:
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = str(e) + ' not found'
| gpl-3.0 |
paragguruji/fintechontwitter | fintechontwitter/preprocess.py | 1 | 4569 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 05 17:53:34 2017
@author: Parag
"""
import pandas
import re
import logging
from datetime import datetime
from collections import Counter
from fintechontwitter import DATA_FILE, DATE_FORMAT, PREPROCESSED
logger = logging.getLogger('fintechontwitter')
key_sequence = ['urls', 'html_tags', 'user_mentions', 'hashtags', 'emoticons',
'numbers', 'words', 'characters', 'misc']
features = ['user_mentions', 'hashtags', 'emoticons',
'plaintext', 'words', 'rt']
regex_strings = {
"emoticons": r"""(?:[%s][%s]?[%s])""" % tuple(map(re.escape,
[r":;8X=",
r"-oO",
r"()[]\|DPpO"])),
"html_tags": r'<[^>]+>',
"user_mentions": r'(?:@[\w_]+)',
"hashtags": r"(?:\#+[\w_]+[\w\'_\-]*[\w_]+)",
"urls": r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|\
(?:%[0-9a-fA-F][0-9a-fA-F]))+",
"numbers": r'(?:(?:\d+,?)+(?:\.?\d+)?)',
"words": r"(?:[a-zA-Z0-9]+(?:[-'_]*[a-zA-Z0-9]+)+)",
"characters": r'(?:[\w])',
"misc": r'(?:[^\w ]+)'}
tokens_re = re.compile(r'(' +
r'|'.join([regex_strings[k] for k in key_sequence]) +
r')',
re.VERBOSE)
plaintext_re = re.compile(r'(' +
r'|'.join([regex_strings[k]
for k in
['numbers', 'hashtags', 'user_mentions',
'words', 'characters']]) +
r')')
regex = {key: re.compile(r'^' + regex_strings[key] + '$', re.VERBOSE)
for key in key_sequence}
non_decimal = re.compile(r'[^\d.]+')
def parse_followers(x):
if isinstance(x, int) or isinstance(x, float):
return x
else:
try:
ret = non_decimal.sub('', x)
except TypeError:
ret = ''
return ret
def parse_date(date_str):
dt = date_str[:20] + date_str[26:]
tz = date_str[20:25]
return pandas.to_datetime(dt, format=DATE_FORMAT, errors='raise') + \
datetime.timedelta(minutes=((60*int(tz[1:3]) + int(tz[3:5])) *
(-1 if tz[0] == '-' else 1)))
def preprocess(s):
s = s.replace('\N',
' ').replace('\n',
' ').replace('\t',
' ').replace('\T', ' ').strip()
while(' ' in s):
s = s.replace(' ', ' ')
tokens = tokens_re.findall(s)
plaintext = s
classified_tokens = {}
classified_tokens['hashtags'] = [token.upper() for token in tokens
if regex['hashtags'].search(token)]
classified_tokens['user_mentions'] = [token for token in tokens if
regex['user_mentions'].search(token)]
classified_tokens['emoticons'] = [token for token in tokens
if regex['emoticons'].search(token)]
classified_tokens['words'] = Counter([token for token in tokens
if regex['words'].search(token)])
plaintext = re.sub(regex_strings['urls'], "", plaintext)
plaintext = re.sub(regex_strings['html_tags'], "", plaintext)
plaintext = ' '.join(plaintext_re.findall(plaintext))
classified_tokens['plaintext'] = plaintext.replace('#', '')
classified_tokens['rt'] = s.startswith('RT')
return classified_tokens
def featureGen(tweet):
feature_data = preprocess(tweet['tweet'])
return pandas.Series([feature_data[key] for key in features])
def load_frame():
df = pandas.read_pickle(DATA_FILE)
logger.info("loaded " + ("processed" if PREPROCESSED else "raw") + " data")
if not PREPROCESSED:
df.loc[:, 'followers'] = \
pandas.to_numeric(df['followers'].map(parse_followers),
errors='raise',
downcast='integer')
logger.info("Follower count transformed")
df.loc[:, 'date'] = df['date'].map(parse_date)
logger.info("Dates transformed...")
logger.info("Preprocessing and generating features...")
newcols = df.apply(featureGen, axis=1)
newcols.columns = features
logger.info("Adding new features to dataframe...")
return df.join(newcols)
else:
return df
| gpl-3.0 |
Dannnno/odo | odo/backends/tests/test_sparksql.py | 2 | 6058 | from __future__ import print_function, absolute_import, division
import pytest
pyspark = pytest.importorskip('pyspark')
py4j = pytest.importorskip('py4j')
import os
import shutil
import json
import tempfile
from contextlib import contextmanager
import toolz
from toolz.compatibility import map
from pyspark.sql import Row, SchemaRDD
try:
from pyspark.sql.types import (ArrayType, StructField, StructType,
IntegerType)
from pyspark.sql.types import StringType
except ImportError:
from pyspark.sql import ArrayType, StructField, StructType, IntegerType
from pyspark.sql import StringType
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import datashape
from datashape import dshape
from odo import odo, discover, Directory, JSONLines
from odo.utils import tmpfile, ignoring
from odo.backends.sparksql import schema_to_dshape, dshape_to_schema
from odo.backends.sparksql import SparkDataFrame
data = [['Alice', 100.0, 1],
['Bob', 200.0, 2],
['Alice', 50.0, 3]]
df = pd.DataFrame(data, columns=['name', 'amount', 'id'])
@pytest.yield_fixture(scope='module')
def people(sc):
with tmpfile('.txt') as fn:
df.to_csv(fn, header=False, index=False)
raw = sc.textFile(fn)
parts = raw.map(lambda line: line.split(','))
yield parts.map(lambda person: Row(name=person[0],
amount=float(person[1]),
id=int(person[2])))
@pytest.fixture(scope='module')
def ctx(sqlctx, people):
try:
df = sqlctx.createDataFrame(people)
except AttributeError:
schema = sqlctx.inferSchema(people)
schema.registerTempTable('t')
schema.registerTempTable('t2')
else:
df2 = sqlctx.createDataFrame(people)
sqlctx.registerDataFrameAsTable(df, 't')
sqlctx.registerDataFrameAsTable(df2, 't2')
return sqlctx
def test_pyspark_to_sparksql(ctx, people):
sdf = odo(data, ctx, dshape=discover(df))
assert isinstance(sdf, (SparkDataFrame, SchemaRDD))
assert (list(map(set, odo(people, list))) ==
list(map(set, odo(sdf, list))))
def test_pyspark_to_sparksql_raises_on_tuple_dshape(ctx, people):
with pytest.raises(TypeError):
odo(data, ctx)
def test_dataframe_to_sparksql(ctx):
sdf = odo(df, ctx)
assert isinstance(sdf, (SparkDataFrame, SchemaRDD))
assert odo(sdf, list) == odo(df, list)
def test_sparksql_to_frame(ctx):
result = odo(ctx.table('t'), pd.DataFrame)
np.testing.assert_array_equal(result.sort_index(axis=1).values,
df.sort_index(axis=1).values)
def test_reduction_to_scalar(ctx):
result = odo(ctx.sql('select sum(amount) from t'), float)
assert isinstance(result, float)
assert result == sum(map(toolz.second, data))
def test_discover_context(ctx):
result = discover(ctx)
assert result is not None
def test_schema_to_dshape():
assert schema_to_dshape(IntegerType()) == datashape.int32
assert schema_to_dshape(
ArrayType(IntegerType(), False)) == dshape("var * int32")
assert schema_to_dshape(
ArrayType(IntegerType(), True)) == dshape("var * ?int32")
assert schema_to_dshape(StructType([
StructField('name', StringType(), False),
StructField('amount', IntegerType(), True)])) \
== dshape("{name: string, amount: ?int32}")
def test_dshape_to_schema():
assert dshape_to_schema('int32') == IntegerType()
assert dshape_to_schema('5 * int32') == ArrayType(IntegerType(), False)
assert dshape_to_schema('5 * ?int32') == ArrayType(IntegerType(), True)
assert dshape_to_schema('{name: string, amount: int32}') == \
StructType([StructField('name', StringType(), False),
StructField('amount', IntegerType(), False)])
assert dshape_to_schema('10 * {name: string, amount: ?int32}') == \
ArrayType(StructType(
[StructField('name', StringType(), False),
StructField('amount', IntegerType(), True)]),
False)
def test_append_spark_df_to_json_lines(ctx):
out = os.linesep.join(map(json.dumps, df.to_dict('records')))
sdf = ctx.table('t')
expected = pd.concat([df, df]).sort('amount').reset_index(drop=True).sort_index(axis=1)
with tmpfile('.json') as fn:
with open(fn, mode='w') as f:
f.write(out + os.linesep)
uri = 'jsonlines://%s' % fn
odo(sdf, uri)
result = odo(uri, pd.DataFrame).sort('amount').reset_index(drop=True).sort_index(axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(raises=py4j.protocol.Py4JJavaError,
reason='bug in sparksql')
def test_append(ctx):
"""Add support for odo(SparkDataFrame, SparkDataFrame) when this is fixed.
"""
a = ctx.table('t2')
a.insertInto('t2')
result = odo(odo(a, pd.DataFrame), set)
expected = odo(pd.concat([odo(a, pd.DataFrame)]) * 2, set)
assert result == expected
def test_load_from_jsonlines(ctx):
with tmpfile('.json') as fn:
js = odo(df, 'jsonlines://%s' % fn)
result = odo(js, ctx, name='r')
assert (list(map(set, odo(result, list))) ==
list(map(set, odo(df, list))))
@contextmanager
def jslines(n=3):
d = tempfile.mkdtemp()
files = []
dfc = df.copy()
for i in range(n):
_, fn = tempfile.mkstemp(suffix='.json', dir=d)
dfc['id'] += i
odo(dfc, 'jsonlines://%s' % fn)
files.append(fn)
yield d
with ignoring(OSError):
shutil.rmtree(d)
def test_load_from_dir_of_jsonlines(ctx):
dfs = []
dfc = df.copy()
for i in range(3):
dfc['id'] += i
dfs.append(dfc.copy())
expected = pd.concat(dfs, axis=0, ignore_index=True)
with jslines() as d:
result = odo(Directory(JSONLines)(d), ctx)
assert (set(map(frozenset, odo(result, list))) ==
set(map(frozenset, odo(expected, list))))
| bsd-3-clause |
aemerick/galaxy_analysis | method_paper_plots/mass_plot.py | 1 | 7752 | from galaxy_analysis.plot.plot_styles import *
from galaxy_analysis.analysis.compute_time_average import compute_time_average
from galaxy_analysis.utilities import utilities
import deepdish as dd
import numpy as np
import matplotlib.pyplot as plt
import glob
import sys
rc('font',size=22)
#data_list, times = utilities.select_data_by_time(dir = work_dir,
# tmin=0.0,tmax=650.0)
#M_HI = np.ones(np.size(data_list))
#M_star = np.ones(np.size(data_list))
#M_total = np.ones(np.size(data_list))
#M_H2 = np.ones(np.size(data_list))
#for i,k in enumerate(data_list):
# M_HI[i] = dd.io.load(k, '/meta_data/M_HI')
# M_star[i] = dd.io.load(k, '/meta_data/M_star')
# M_total[i] = dd.io.load(k, '/meta_data/M_H_total') + dd.io.load(k,'/meta_data/M_He_total')
# M_H2[i] = dd.io.load(k, '/meta_data/M_H2I')
#
#
def plot_mass_resolution(work_dir = './', output_dir = None, comparison = None, new_color = False, colors = None):
if output_dir is None:
output_dir = work_dir
if comparison is None:
labels = {'3pcH2' : '3.6 pc' , '6pcH2' : '7.2 pc', 'Fiducial' : 'Fiducial'}
lstyle = {'3pcH2' : '--', '6pcH2' : '-.', 'Fiducial' : '-'}
dirs = {'3pcH2' : '../3pc_H2/' , '6pcH2' : '../6pc_H2/', 'Fiducial' : work_dir}
comparison = {}
for k in labels.keys():
comparison[k] = (dirs[k],labels[k],lstyle[k])
else:
dirs = {}
labels = {}
lstyle = {}
for k in comparison.keys():
dirs[k] = work_dir + comparison[k][0]
labels[k] = comparison[k][1]
lstyle[k] = comparison[k][2]
# labels = {'3pc_hsn' : '3.6 pc - SNx2', '3pc' : '3.6 pc', 'final_sndriving' : 'Fiducial', '6pc_hsn' : '7.2 pc'}
# lstyle = {'3pc_hsn' : '--', '3pc' : ':', 'final_sndriving' : '-', '6pc_hsn' : '-.'}
all_data = {}
for k in comparison.keys():
all_data[k] = {}
# if k == 'final_sndriving':
# all_data[k]['times'] = times
# all_data[k]['M_HI'] = M_HI
# all_data[k]['M_star'] = M_star
# all_data[k]['M_total'] = M_total
# all_data[k]['M_H2I'] = M_H2
#
if True:
dl, t = utilities.select_data_by_time(dir = dirs[k], tmin = 0.0, tmax=1000.0)
all_data[k]['times'] = t
all_data[k]['M_HI'] = np.zeros(np.size(dl))
all_data[k]['M_star'] = np.zeros(np.size(dl))
all_data[k]['M_total'] = np.zeros(np.size(dl))
all_data[k]['M_H2I'] = np.zeros(np.size(dl))
for i,d in enumerate(dl):
all_data[k]['M_HI'][i] = dd.io.load(d, '/meta_data/M_HI')
all_data[k]['M_star'][i] = dd.io.load(d, '/meta_data/M_star')
all_data[k]['M_total'][i] = dd.io.load(d, '/meta_data/M_H_total') + dd.io.load(d,'/meta_data/M_He_total')
all_data[k]['M_H2I'][i] = dd.io.load(d, '/meta_data/M_H2I')
# done collecting data rom all data sets
fig, ax = plt.subplots()
fig.set_size_inches(8,8)
if colors is None:
colors = {}
i = 0
for k in comparison.keys():
if not (k in colors.keys()):
colors[k] = 'C%1i'%(i)
i = i + 1
lstyle['M_total'] = '-'
lstyle['M_HI'] = '--'
lstyle['M_H2I'] = '-.'
lstyle['M_star'] = ':'
for k in comparison.keys():
for field,color in [('M_total','black'), ('M_HI','C0'), ('M_H2I','C1'), ('M_star','C3')]:
# for field, ls in [('M_total','-'), ('M_HI', '--'), ('M_H2I', ':')]: # , ('M_star' , '-.')]:
if field == 'M_total':
label = labels[k]
else:
label = None
# print k, field, np.size(all_data[k]['times']), np.size(all_data[k][field])
if new_color:
ax.plot(all_data[k]['times'] - all_data[k]['times'][0], all_data[k][field],
ls = lstyle[field],
lw = line_width, color = colors[k])
else:
plot_histogram(ax, all_data[k]['times'] - all_data[k]['times'][0], all_data[k][field], ls = lstyle[k], lw = line_width, color = color)
if new_color:
ax.plot((-1,-1), (-2,-2), ls = '-', lw = 3, color = 'black', label = r'M$_{\rm total}$')
ax.plot((-1,-1), (-2,-2), ls = '--', lw = 3, color = 'black', label = r'M$_{\rm HI}$')
ax.plot((-1,-1), (-2,-2), ls = '-.', lw = 3, color = 'black', label = r'M$_{\rm H_2}$')
ax.plot((-1,-1), (-2,-2), ls = ':', lw = 3, color = 'black', label = r'M$_{\rm *}$')
else:
ax.plot((-1,-1), (-2,-2), ls = '-', lw = 3, color = 'black', label = r'M$_{\rm total}$')
ax.plot((-1,-1), (-2,-2), ls = '-', lw = 3, color = 'C0', label = r'M$_{\rm HI}$')
ax.plot((-1,-1), (-2,-2), ls = '-', lw = 3, color = 'C1', label = r'M$_{\rm H_2}$')
ax.plot((-1,-1), (-2,-2), ls = '-', lw = 3, color = 'C3', label = r'M$_{\rm *}$')
ax.set_xlabel(r'Time (Myr)')
ax.set_ylabel(r'Mass in Disk (M$_{\odot}$)')
ax.semilogy()
ax.set_xlim(0.0, 500.0)
ax.legend(loc='lower right')
# ax.set_ylim(6E4, 3E6)
plt.tight_layout()
plt.minorticks_on()
fig.savefig(work_dir + output_dir + 'mass_evolution_resolution.png')
plt.close()
return
def plot_mass_evolution(work_dir, t_f = None, image_num = 0, outdir = './',
TMAX = 500.0):
data_list, times = utilities.select_data_by_time(dir = work_dir,
tmin=0.0,tmax=650.0)
M_HI = np.ones(np.size(data_list))
M_star = np.ones(np.size(data_list))
M_total = np.ones(np.size(data_list))
M_H2 = np.ones(np.size(data_list))
for i,k in enumerate(data_list):
M_HI[i] = dd.io.load(k, '/meta_data/M_HI')
M_star[i] = dd.io.load(k, '/meta_data/M_star')
M_total[i] = dd.io.load(k, '/meta_data/M_H_total') + dd.io.load(k,'/meta_data/M_He_total')
M_H2[i] = dd.io.load(k, '/meta_data/M_H2I')
selection = (times == times) # all vals
plot_times = times
if not (t_f is None):
selection = times <= t_f
plot_times = times[selection]
plot_times = plot_times - plot_times[0]
fig, ax = plt.subplots()
fig.set_size_inches(8,8)
plot_histogram(ax, plot_times, M_total[selection], lw = line_width, color = 'black', label = r'M$_{\rm total}$')
plot_histogram(ax, plot_times, M_HI[selection], lw = line_width, color = 'C0', label = r'M$_{\rm HI}$')
plot_histogram(ax, plot_times, M_H2[selection], lw = line_width, color = 'C1', label = r'M$_{\rm H_2 }$')
plot_histogram(ax, plot_times, M_star[selection], lw = line_width, color = 'C3', label = r'M$_*$')
ax.set_xlabel(r'Time (Myr)')
ax.set_ylabel(r'Mass in Disk (M$_{\odot}$)')
ax.semilogy()
ax.set_xlim(np.min(times-times[0]), np.min([TMAX, np.max(times - times[0])]) )
ax.legend(loc='lower right')
plt.tight_layout()
plt.minorticks_on()
if t_f is None:
fig.savefig(outdir + 'mass_evolution.png')
else:
fig.savefig('./mass_evolution_movie/mass_evolution_%0004i.png'%(image_num))
plt.close()
return
if __name__ == "__main__":
if len(sys.argv) > 1:
wdir = ''
work_dir = sys.argv[1]
else:
wdir = '/mnt/ceph/users/emerick/enzo_runs/pleiades/starIC/run11_30km/'
work_dir = wdir + 'final_sndriving/'
plot_mass_evolution(work_dir, TMAX = 1.0E3)
plot_mass_resolution(work_dir)
if False:
for i in np.arange(np.size(times)):
print(i)
plot_mass_evolution(t_f = times[i], image_num = i)
| mit |
cpcloud/arrow | python/pyarrow/tests/test_convert_pandas.py | 1 | 75527 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import decimal
import json
import multiprocessing as mp
from collections import OrderedDict
from datetime import date, datetime, time, timedelta
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.util.testing as tm
import pytest
import pyarrow as pa
import pyarrow.types as patypes
from pyarrow.compat import PY2
from .pandas_examples import dataframe_with_arrays, dataframe_with_lists
def _alltypes_example(size=100):
return pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
# TODO(wesm): Pandas only support ns resolution, Arrow supports s, ms,
# us, ns
'datetime': np.arange("2016-01-01T00:00:00.001", size,
dtype='datetime64[ms]'),
'str': [str(x) for x in range(size)],
'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],
'empty_str': [''] * size
})
def _check_pandas_roundtrip(df, expected=None, use_threads=False,
expected_schema=None,
check_dtype=True, schema=None,
preserve_index=False,
as_batch=False):
klass = pa.RecordBatch if as_batch else pa.Table
table = klass.from_pandas(df, schema=schema,
preserve_index=preserve_index,
nthreads=2 if use_threads else 1)
result = table.to_pandas(use_threads=use_threads)
if expected_schema:
# all occurences of _check_pandas_roundtrip passes expected_schema
# without the pandas generated key-value metadata, so we need to
# add it before checking schema equality
expected_schema = expected_schema.add_metadata(table.schema.metadata)
assert table.schema.equals(expected_schema)
if expected is None:
expected = df
tm.assert_frame_equal(result, expected, check_dtype=check_dtype,
check_index_type=('equiv' if preserve_index
else False))
def _check_series_roundtrip(s, type_=None, expected_pa_type=None):
arr = pa.array(s, from_pandas=True, type=type_)
if type_ is not None and expected_pa_type is None:
expected_pa_type = type_
if expected_pa_type is not None:
assert arr.type == expected_pa_type
result = pd.Series(arr.to_pandas(), name=s.name)
if patypes.is_timestamp(arr.type) and arr.type.tz is not None:
result = (result.dt.tz_localize('utc')
.dt.tz_convert(arr.type.tz))
tm.assert_series_equal(s, result)
def _check_array_roundtrip(values, expected=None, mask=None,
type=None):
arr = pa.array(values, from_pandas=True, mask=mask, type=type)
result = arr.to_pandas()
values_nulls = pd.isnull(values)
if mask is None:
assert arr.null_count == values_nulls.sum()
else:
assert arr.null_count == (mask | values_nulls).sum()
if mask is None:
tm.assert_series_equal(pd.Series(result), pd.Series(values),
check_names=False)
else:
expected = pd.Series(np.ma.masked_array(values, mask=mask))
tm.assert_series_equal(pd.Series(result), expected,
check_names=False)
def _check_array_from_pandas_roundtrip(np_array, type=None):
arr = pa.array(np_array, from_pandas=True, type=type)
result = arr.to_pandas()
npt.assert_array_equal(result, np_array)
class TestConvertMetadata(object):
"""
Conversion tests for Pandas metadata & indices.
"""
def test_non_string_columns(self):
df = pd.DataFrame({0: [1, 2, 3]})
table = pa.Table.from_pandas(df)
assert table.column(0).name == '0'
def test_from_pandas_with_columns(self):
df = pd.DataFrame({0: [1, 2, 3], 1: [1, 3, 3], 2: [2, 4, 5]})
table = pa.Table.from_pandas(df, columns=[0, 1])
expected = pa.Table.from_pandas(df[[0, 1]])
assert expected.equals(table)
record_batch_table = pa.RecordBatch.from_pandas(df, columns=[0, 1])
record_batch_expected = pa.RecordBatch.from_pandas(df[[0, 1]])
assert record_batch_expected.equals(record_batch_table)
def test_column_index_names_are_preserved(self):
df = pd.DataFrame({'data': [1, 2, 3]})
df.columns.names = ['a']
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns(self):
columns = pd.MultiIndex.from_arrays([
['one', 'two'], ['X', 'Y']
])
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns_with_dtypes(self):
columns = pd.MultiIndex.from_arrays(
[
['one', 'two'],
pd.DatetimeIndex(['2017-08-01', '2017-08-02']),
],
names=['level_1', 'level_2'],
)
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns_unicode(self):
columns = pd.MultiIndex.from_arrays([[u'あ', u'い'], ['X', 'Y']])
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_integer_index_column(self):
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')])
_check_pandas_roundtrip(df, preserve_index=True)
def test_index_metadata_field_name(self):
# test None case, and strangely named non-index columns
df = pd.DataFrame(
[(1, 'a', 3.1), (2, 'b', 2.2), (3, 'c', 1.3)],
index=pd.MultiIndex.from_arrays(
[['c', 'b', 'a'], [3, 2, 1]],
names=[None, 'foo']
),
columns=['a', None, '__index_level_0__'],
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
col1, col2, col3, idx0, foo = js['columns']
assert col1['name'] == 'a'
assert col1['name'] == col1['field_name']
assert col2['name'] is None
assert col2['field_name'] == 'None'
assert col3['name'] == '__index_level_0__'
assert col3['name'] == col3['field_name']
idx0_name, foo_name = js['index_columns']
assert idx0_name == '__index_level_0__'
assert idx0['field_name'] == idx0_name
assert idx0['name'] is None
assert foo_name == 'foo'
assert foo['field_name'] == foo_name
assert foo['name'] == foo_name
def test_categorical_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.Index(list('def'), dtype='category')
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] is None
assert column_indexes['pandas_type'] == 'categorical'
assert column_indexes['numpy_type'] == 'int8'
md = column_indexes['metadata']
assert md['num_categories'] == 3
assert md['ordered'] is False
def test_string_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.Index(list('def'), name='stringz')
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] == 'stringz'
assert column_indexes['name'] == column_indexes['field_name']
assert column_indexes['pandas_type'] == ('bytes' if PY2 else 'unicode')
assert column_indexes['numpy_type'] == 'object'
md = column_indexes['metadata']
if not PY2:
assert len(md) == 1
assert md['encoding'] == 'UTF-8'
else:
assert md is None or 'encoding' not in md
def test_datetimetz_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.date_range(
start='2017-01-01', periods=3, tz='America/New_York'
)
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] is None
assert column_indexes['pandas_type'] == 'datetimetz'
assert column_indexes['numpy_type'] == 'datetime64[ns]'
md = column_indexes['metadata']
assert md['timezone'] == 'America/New_York'
def test_datetimetz_row_index(self):
df = pd.DataFrame({
'a': pd.date_range(
start='2017-01-01', periods=3, tz='America/New_York'
)
})
df = df.set_index('a')
_check_pandas_roundtrip(df, preserve_index=True)
def test_categorical_row_index(self):
df = pd.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]})
df['a'] = df.a.astype('category')
df = df.set_index('a')
_check_pandas_roundtrip(df, preserve_index=True)
def test_duplicate_column_names_does_not_crash(self):
df = pd.DataFrame([(1, 'a'), (2, 'b')], columns=list('aa'))
with pytest.raises(ValueError):
pa.Table.from_pandas(df)
def test_dictionary_indices_boundscheck(self):
# ARROW-1658. No validation of indices leads to segfaults in pandas
indices = [[0, 1], [0, -1]]
for inds in indices:
arr = pa.DictionaryArray.from_arrays(inds, ['a'], safe=False)
batch = pa.RecordBatch.from_arrays([arr], ['foo'])
table = pa.Table.from_batches([batch, batch, batch])
with pytest.raises(pa.ArrowInvalid):
arr.to_pandas()
with pytest.raises(pa.ArrowInvalid):
table.to_pandas()
def test_unicode_with_unicode_column_and_index(self):
df = pd.DataFrame({u'あ': [u'い']}, index=[u'う'])
_check_pandas_roundtrip(df, preserve_index=True)
def test_mixed_unicode_column_names(self):
df = pd.DataFrame({u'あ': [u'い'], b'a': 1}, index=[u'う'])
# TODO(phillipc): Should this raise?
with pytest.raises(AssertionError):
_check_pandas_roundtrip(df, preserve_index=True)
def test_binary_column_name(self):
column_data = [u'い']
key = u'あ'.encode('utf8')
data = {key: column_data}
df = pd.DataFrame(data)
# we can't use _check_pandas_roundtrip here because our metdata
# is always decoded as utf8: even if binary goes in, utf8 comes out
t = pa.Table.from_pandas(df, preserve_index=True)
df2 = t.to_pandas()
assert df.values[0] == df2.values[0]
assert df.index.values[0] == df2.index.values[0]
assert df.columns[0] == key
def test_multiindex_duplicate_values(self):
num_rows = 3
numbers = list(range(num_rows))
index = pd.MultiIndex.from_arrays(
[['foo', 'foo', 'bar'], numbers],
names=['foobar', 'some_numbers'],
)
df = pd.DataFrame({'numbers': numbers}, index=index)
table = pa.Table.from_pandas(df)
result_df = table.to_pandas()
tm.assert_frame_equal(result_df, df)
def test_metadata_with_mixed_types(self):
df = pd.DataFrame({'data': [b'some_bytes', u'some_unicode']})
table = pa.Table.from_pandas(df)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'bytes'
assert data_column['numpy_type'] == 'object'
def test_list_metadata(self):
df = pd.DataFrame({'data': [[1], [2, 3, 4], [5] * 7]})
schema = pa.schema([pa.field('data', type=pa.list_(pa.int64()))])
table = pa.Table.from_pandas(df, schema=schema)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'list[int64]'
assert data_column['numpy_type'] == 'object'
def test_decimal_metadata(self):
expected = pd.DataFrame({
'decimals': [
decimal.Decimal('394092382910493.12341234678'),
-decimal.Decimal('314292388910493.12343437128'),
]
})
table = pa.Table.from_pandas(expected)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'decimal'
assert data_column['numpy_type'] == 'object'
assert data_column['metadata'] == {'precision': 26, 'scale': 11}
def test_table_column_subset_metadata(self):
# ARROW-1883
df = pd.DataFrame({
'a': [1, 2, 3],
'b': pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')})
table = pa.Table.from_pandas(df)
table_subset = table.remove_column(1)
result = table_subset.to_pandas()
tm.assert_frame_equal(result, df[['a']])
table_subset2 = table_subset.remove_column(1)
result = table_subset2.to_pandas()
tm.assert_frame_equal(result, df[['a']])
# non-default index
for index in [
pd.Index(['a', 'b', 'c'], name='index'),
pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')]:
df = pd.DataFrame({'a': [1, 2, 3],
'b': [.1, .2, .3]}, index=index)
table = pa.Table.from_pandas(df)
table_subset = table.remove_column(1)
result = table_subset.to_pandas()
tm.assert_frame_equal(result, df[['a']])
table_subset2 = table_subset.remove_column(1)
result = table_subset2.to_pandas()
tm.assert_frame_equal(result, df[['a']].reset_index(drop=True))
def test_empty_list_metadata(self):
# Create table with array of empty lists, forced to have type
# list(string) in pyarrow
c1 = [["test"], ["a", "b"], None]
c2 = [[], [], []]
arrays = OrderedDict([
('c1', pa.array(c1, type=pa.list_(pa.string()))),
('c2', pa.array(c2, type=pa.list_(pa.string()))),
])
rb = pa.RecordBatch.from_arrays(
list(arrays.values()),
list(arrays.keys())
)
tbl = pa.Table.from_batches([rb])
# First roundtrip changes schema, because pandas cannot preserve the
# type of empty lists
df = tbl.to_pandas()
tbl2 = pa.Table.from_pandas(df, preserve_index=True)
md2 = json.loads(tbl2.schema.metadata[b'pandas'].decode('utf8'))
# Second roundtrip
df2 = tbl2.to_pandas()
expected = pd.DataFrame(OrderedDict([('c1', c1), ('c2', c2)]))
tm.assert_frame_equal(df2, expected)
assert md2['columns'] == [
{
'name': 'c1',
'field_name': 'c1',
'metadata': None,
'numpy_type': 'object',
'pandas_type': 'list[unicode]',
},
{
'name': 'c2',
'field_name': 'c2',
'metadata': None,
'numpy_type': 'object',
'pandas_type': 'list[empty]',
},
{
'name': None,
'field_name': '__index_level_0__',
'metadata': None,
'numpy_type': 'int64',
'pandas_type': 'int64',
}
]
class TestConvertPrimitiveTypes(object):
"""
Conversion tests for primitive (e.g. numeric) types.
"""
def test_float_no_nulls(self):
data = {}
fields = []
dtypes = [('f2', pa.float16()),
('f4', pa.float32()),
('f8', pa.float64())]
num_values = 100
for numpy_dtype, arrow_dtype in dtypes:
values = np.random.randn(num_values)
data[numpy_dtype] = values.astype(numpy_dtype)
fields.append(pa.field(numpy_dtype, arrow_dtype))
df = pd.DataFrame(data)
schema = pa.schema(fields)
_check_pandas_roundtrip(df, expected_schema=schema)
def test_float_nulls(self):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
dtypes = [('f2', pa.float16()),
('f4', pa.float32()),
('f8', pa.float64())]
names = ['f2', 'f4', 'f8']
expected_cols = []
arrays = []
fields = []
for name, arrow_dtype in dtypes:
values = np.random.randn(num_values).astype(name)
arr = pa.array(values, from_pandas=True, mask=null_mask)
arrays.append(arr)
fields.append(pa.field(name, arrow_dtype))
values[null_mask] = np.nan
expected_cols.append(values)
ex_frame = pd.DataFrame(dict(zip(names, expected_cols)),
columns=names)
table = pa.Table.from_arrays(arrays, names)
assert table.schema.equals(pa.schema(fields))
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_float_nulls_to_ints(self):
# ARROW-2135
df = pd.DataFrame({"a": [1.0, 2.0, pd.np.NaN]})
schema = pa.schema([pa.field("a", pa.int16(), nullable=True)])
table = pa.Table.from_pandas(df, schema=schema)
assert table[0].to_pylist() == [1, 2, None]
tm.assert_frame_equal(df, table.to_pandas())
def test_integer_no_nulls(self):
data = OrderedDict()
fields = []
numpy_dtypes = [
('i1', pa.int8()), ('i2', pa.int16()),
('i4', pa.int32()), ('i8', pa.int64()),
('u1', pa.uint8()), ('u2', pa.uint16()),
('u4', pa.uint32()), ('u8', pa.uint64()),
('longlong', pa.int64()), ('ulonglong', pa.uint64())
]
num_values = 100
for dtype, arrow_dtype in numpy_dtypes:
info = np.iinfo(dtype)
values = np.random.randint(max(info.min, np.iinfo(np.int_).min),
min(info.max, np.iinfo(np.int_).max),
size=num_values)
data[dtype] = values.astype(dtype)
fields.append(pa.field(dtype, arrow_dtype))
df = pd.DataFrame(data)
schema = pa.schema(fields)
_check_pandas_roundtrip(df, expected_schema=schema)
def test_all_integer_types(self):
# Test all Numpy integer aliases
data = OrderedDict()
numpy_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8',
'byte', 'ubyte', 'short', 'ushort', 'intc', 'uintc',
'int_', 'uint', 'longlong', 'ulonglong']
for dtype in numpy_dtypes:
data[dtype] = np.arange(12, dtype=dtype)
df = pd.DataFrame(data)
_check_pandas_roundtrip(df)
# Do the same with pa.array()
# (for some reason, it doesn't use the same code paths at all)
for np_arr in data.values():
arr = pa.array(np_arr)
assert arr.to_pylist() == np_arr.tolist()
def test_integer_with_nulls(self):
# pandas requires upcast to float dtype
int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
expected_cols = []
arrays = []
for name in int_dtypes:
values = np.random.randint(0, 100, size=num_values)
arr = pa.array(values, mask=null_mask)
arrays.append(arr)
expected = values.astype('f8')
expected[null_mask] = np.nan
expected_cols.append(expected)
ex_frame = pd.DataFrame(dict(zip(int_dtypes, expected_cols)),
columns=int_dtypes)
table = pa.Table.from_arrays(arrays, int_dtypes)
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_array_from_pandas_type_cast(self):
arr = np.arange(10, dtype='int64')
target_type = pa.int8()
result = pa.array(arr, type=target_type)
expected = pa.array(arr.astype('int8'))
assert result.equals(expected)
def test_boolean_no_nulls(self):
num_values = 100
np.random.seed(0)
df = pd.DataFrame({'bools': np.random.randn(num_values) > 0})
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_boolean_nulls(self):
# pandas requires upcast to object dtype
num_values = 100
np.random.seed(0)
mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 10, size=num_values) < 5
arr = pa.array(values, mask=mask)
expected = values.astype(object)
expected[mask] = None
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
ex_frame = pd.DataFrame({'bools': expected})
table = pa.Table.from_arrays([arr], ['bools'])
assert table.schema.equals(schema)
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_float_object_nulls(self):
arr = np.array([None, 1.5, np.float64(3.5)] * 5, dtype=object)
df = pd.DataFrame({'floats': arr})
expected = pd.DataFrame({'floats': pd.to_numeric(arr)})
field = pa.field('floats', pa.float64())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected=expected,
expected_schema=schema)
def test_int_object_nulls(self):
arr = np.array([None, 1, np.int64(3)] * 5, dtype=object)
df = pd.DataFrame({'ints': arr})
expected = pd.DataFrame({'ints': pd.to_numeric(arr)})
field = pa.field('ints', pa.int64())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected=expected,
expected_schema=schema)
def test_boolean_object_nulls(self):
arr = np.array([False, None, True] * 100, dtype=object)
df = pd.DataFrame({'bools': arr})
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_all_nulls_cast_numeric(self):
arr = np.array([None], dtype=object)
def _check_type(t):
a2 = pa.array(arr, type=t)
assert a2.type == t
assert a2[0].as_py() is None
_check_type(pa.int32())
_check_type(pa.float64())
def test_half_floats_from_numpy(self):
arr = np.array([1.5, np.nan], dtype=np.float16)
a = pa.array(arr, type=pa.float16())
x, y = a.to_pylist()
assert isinstance(x, np.float16)
assert x == 1.5
assert isinstance(y, np.float16)
assert np.isnan(y)
a = pa.array(arr, type=pa.float16(), from_pandas=True)
x, y = a.to_pylist()
assert isinstance(x, np.float16)
assert x == 1.5
assert y is None
@pytest.mark.parametrize('dtype',
['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'])
def test_array_integer_object_nulls_option(dtype):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 100, size=num_values, dtype=dtype)
array = pa.array(values, mask=null_mask)
if null_mask.any():
expected = values.astype('O')
expected[null_mask] = None
else:
expected = values
result = array.to_pandas(integer_object_nulls=True)
np.testing.assert_equal(result, expected)
@pytest.mark.parametrize('dtype',
['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'])
def test_table_integer_object_nulls_option(dtype):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 100, size=num_values, dtype=dtype)
array = pa.array(values, mask=null_mask)
if null_mask.any():
expected = values.astype('O')
expected[null_mask] = None
else:
expected = values
expected = pd.DataFrame({dtype: expected})
table = pa.Table.from_arrays([array], [dtype])
result = table.to_pandas(integer_object_nulls=True)
tm.assert_frame_equal(result, expected)
class TestConvertDateTimeLikeTypes(object):
"""
Conversion tests for datetime- and timestamp-like types (date64, etc.).
"""
def test_timestamps_notimezone_no_nulls(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
field = pa.field('datetime64', pa.timestamp('ns'))
schema = pa.schema([field])
_check_pandas_roundtrip(
df,
expected_schema=schema,
)
def test_timestamps_notimezone_nulls(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
field = pa.field('datetime64', pa.timestamp('ns'))
schema = pa.schema([field])
_check_pandas_roundtrip(
df,
expected_schema=schema,
)
def test_timestamps_with_timezone(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123',
'2006-01-13T12:34:56.432',
'2010-08-13T05:46:57.437'],
dtype='datetime64[ms]')
})
df['datetime64'] = (df['datetime64'].dt.tz_localize('US/Eastern')
.to_frame())
_check_pandas_roundtrip(df)
_check_series_roundtrip(df['datetime64'])
# drop-in a null and ns instead of ms
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
None,
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
df['datetime64'] = (df['datetime64'].dt.tz_localize('US/Eastern')
.to_frame())
_check_pandas_roundtrip(df)
def test_python_datetime(self):
# ARROW-2106
date_array = [datetime.today() + timedelta(days=x) for x in range(10)]
df = pd.DataFrame({
'datetime': pd.Series(date_array, dtype=object)
})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.TimestampArray)
result = table.to_pandas()
expected_df = pd.DataFrame({
'datetime': date_array
})
tm.assert_frame_equal(expected_df, result)
def test_python_datetime_subclass(self):
class MyDatetime(datetime):
# see https://github.com/pandas-dev/pandas/issues/21142
nanosecond = 0.0
date_array = [MyDatetime(2000, 1, 1, 1, 1, 1)]
df = pd.DataFrame({"datetime": pd.Series(date_array, dtype=object)})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.TimestampArray)
result = table.to_pandas()
expected_df = pd.DataFrame({"datetime": date_array})
# https://github.com/pandas-dev/pandas/issues/21142
expected_df["datetime"] = pd.to_datetime(expected_df["datetime"])
tm.assert_frame_equal(expected_df, result)
def test_python_date_subclass(self):
class MyDate(date):
pass
date_array = [MyDate(2000, 1, 1)]
df = pd.DataFrame({"date": pd.Series(date_array, dtype=object)})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.Date32Array)
result = table.to_pandas()
expected_df = pd.DataFrame(
{"date": np.array(["2000-01-01"], dtype="datetime64[ns]")}
)
tm.assert_frame_equal(expected_df, result)
def test_datetime64_to_date32(self):
# ARROW-1718
arr = pa.array([date(2017, 10, 23), None])
c = pa.Column.from_array("d", arr)
s = c.to_pandas()
arr2 = pa.Array.from_pandas(s, type=pa.date32())
assert arr2.equals(arr.cast('date32'))
@pytest.mark.parametrize('mask', [
None,
np.ones(3),
np.array([True, False, False]),
])
def test_pandas_datetime_to_date64(self, mask):
s = pd.to_datetime([
'2018-05-10T00:00:00',
'2018-05-11T00:00:00',
'2018-05-12T00:00:00',
])
arr = pa.Array.from_pandas(s, type=pa.date64(), mask=mask)
data = np.array([
date(2018, 5, 10),
date(2018, 5, 11),
date(2018, 5, 12)
])
expected = pa.array(data, mask=mask, type=pa.date64())
assert arr.equals(expected)
@pytest.mark.parametrize('mask', [
None,
np.ones(3),
np.array([True, False, False])
])
def test_pandas_datetime_to_date64_failures(self, mask):
s = pd.to_datetime([
'2018-05-10T10:24:01',
'2018-05-11T10:24:01',
'2018-05-12T10:24:01',
])
expected_msg = 'Timestamp value had non-zero intraday milliseconds'
with pytest.raises(pa.ArrowInvalid, match=expected_msg):
pa.Array.from_pandas(s, type=pa.date64(), mask=mask)
def test_date_infer(self):
df = pd.DataFrame({
'date': [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]})
table = pa.Table.from_pandas(df, preserve_index=False)
field = pa.field('date', pa.date32())
# schema's metadata is generated by from_pandas conversion
expected_schema = pa.schema([field], metadata=table.schema.metadata)
assert table.schema.equals(expected_schema)
result = table.to_pandas()
expected = df.copy()
expected['date'] = pd.to_datetime(df['date'])
tm.assert_frame_equal(result, expected)
def test_date_mask(self):
arr = np.array([date(2017, 4, 3), date(2017, 4, 4)],
dtype='datetime64[D]')
mask = [True, False]
result = pa.array(arr, mask=np.array(mask))
expected = np.array([None, date(2017, 4, 4)], dtype='datetime64[D]')
expected = pa.array(expected, from_pandas=True)
assert expected.equals(result)
def test_date_objects_typed(self):
arr = np.array([
date(2017, 4, 3),
None,
date(2017, 4, 4),
date(2017, 4, 5)], dtype=object)
arr_i4 = np.array([17259, -1, 17260, 17261], dtype='int32')
arr_i8 = arr_i4.astype('int64') * 86400000
mask = np.array([False, True, False, False])
t32 = pa.date32()
t64 = pa.date64()
a32 = pa.array(arr, type=t32)
a64 = pa.array(arr, type=t64)
a32_expected = pa.array(arr_i4, mask=mask, type=t32)
a64_expected = pa.array(arr_i8, mask=mask, type=t64)
assert a32.equals(a32_expected)
assert a64.equals(a64_expected)
# Test converting back to pandas
colnames = ['date32', 'date64']
table = pa.Table.from_arrays([a32, a64], colnames)
table_pandas = table.to_pandas()
ex_values = (np.array(['2017-04-03', '2017-04-04', '2017-04-04',
'2017-04-05'],
dtype='datetime64[D]')
.astype('datetime64[ns]'))
ex_values[1] = pd.NaT.value
expected_pandas = pd.DataFrame({'date32': ex_values,
'date64': ex_values},
columns=colnames)
tm.assert_frame_equal(table_pandas, expected_pandas)
def test_dates_from_integers(self):
t1 = pa.date32()
t2 = pa.date64()
arr = np.array([17259, 17260, 17261], dtype='int32')
arr2 = arr.astype('int64') * 86400000
a1 = pa.array(arr, type=t1)
a2 = pa.array(arr2, type=t2)
expected = date(2017, 4, 3)
assert a1[0].as_py() == expected
assert a2[0].as_py() == expected
@pytest.mark.xfail(reason="not supported ATM",
raises=NotImplementedError)
def test_timedelta(self):
# TODO(jreback): Pandas only support ns resolution
# Arrow supports ??? for resolution
df = pd.DataFrame({
'timedelta': np.arange(start=0, stop=3 * 86400000,
step=86400000,
dtype='timedelta64[ms]')
})
pa.Table.from_pandas(df)
def test_pytime_from_pandas(self):
pytimes = [time(1, 2, 3, 1356),
time(4, 5, 6, 1356)]
# microseconds
t1 = pa.time64('us')
aobjs = np.array(pytimes + [None], dtype=object)
parr = pa.array(aobjs)
assert parr.type == t1
assert parr[0].as_py() == pytimes[0]
assert parr[1].as_py() == pytimes[1]
assert parr[2] is pa.NA
# DataFrame
df = pd.DataFrame({'times': aobjs})
batch = pa.RecordBatch.from_pandas(df)
assert batch[0].equals(parr)
# Test ndarray of int64 values
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
a1 = pa.array(arr, type=pa.time64('us'))
assert a1[0].as_py() == pytimes[0]
a2 = pa.array(arr * 1000, type=pa.time64('ns'))
assert a2[0].as_py() == pytimes[0]
a3 = pa.array((arr / 1000).astype('i4'),
type=pa.time32('ms'))
assert a3[0].as_py() == pytimes[0].replace(microsecond=1000)
a4 = pa.array((arr / 1000000).astype('i4'),
type=pa.time32('s'))
assert a4[0].as_py() == pytimes[0].replace(microsecond=0)
def test_arrow_time_to_pandas(self):
pytimes = [time(1, 2, 3, 1356),
time(4, 5, 6, 1356),
time(0, 0, 0)]
expected = np.array(pytimes[:2] + [None])
expected_ms = np.array([x.replace(microsecond=1000)
for x in pytimes[:2]] +
[None])
expected_s = np.array([x.replace(microsecond=0)
for x in pytimes[:2]] +
[None])
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
null_mask = np.array([False, False, True], dtype=bool)
a1 = pa.array(arr, mask=null_mask, type=pa.time64('us'))
a2 = pa.array(arr * 1000, mask=null_mask,
type=pa.time64('ns'))
a3 = pa.array((arr / 1000).astype('i4'), mask=null_mask,
type=pa.time32('ms'))
a4 = pa.array((arr / 1000000).astype('i4'), mask=null_mask,
type=pa.time32('s'))
names = ['time64[us]', 'time64[ns]', 'time32[ms]', 'time32[s]']
batch = pa.RecordBatch.from_arrays([a1, a2, a3, a4], names)
arr = a1.to_pandas()
assert (arr == expected).all()
arr = a2.to_pandas()
assert (arr == expected).all()
arr = a3.to_pandas()
assert (arr == expected_ms).all()
arr = a4.to_pandas()
assert (arr == expected_s).all()
df = batch.to_pandas()
expected_df = pd.DataFrame({'time64[us]': expected,
'time64[ns]': expected,
'time32[ms]': expected_ms,
'time32[s]': expected_s},
columns=names)
tm.assert_frame_equal(df, expected_df)
def test_numpy_datetime64_columns(self):
datetime64_ns = np.array([
'2007-07-13T01:23:34.123456789',
None,
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
_check_array_from_pandas_roundtrip(datetime64_ns)
datetime64_us = np.array([
'2007-07-13T01:23:34.123456',
None,
'2006-01-13T12:34:56.432539',
'2010-08-13T05:46:57.437699'],
dtype='datetime64[us]')
_check_array_from_pandas_roundtrip(datetime64_us)
datetime64_ms = np.array([
'2007-07-13T01:23:34.123',
None,
'2006-01-13T12:34:56.432',
'2010-08-13T05:46:57.437'],
dtype='datetime64[ms]')
_check_array_from_pandas_roundtrip(datetime64_ms)
datetime64_s = np.array([
'2007-07-13T01:23:34',
None,
'2006-01-13T12:34:56',
'2010-08-13T05:46:57'],
dtype='datetime64[s]')
_check_array_from_pandas_roundtrip(datetime64_s)
@pytest.mark.parametrize('dtype', [pa.date32(), pa.date64()])
def test_numpy_datetime64_day_unit(self, dtype):
datetime64_d = np.array([
'2007-07-13',
None,
'2006-01-15',
'2010-08-19'],
dtype='datetime64[D]')
_check_array_from_pandas_roundtrip(datetime64_d, type=dtype)
def test_array_from_pandas_date_with_mask(self):
m = np.array([True, False, True])
data = pd.Series([
date(1990, 1, 1),
date(1991, 1, 1),
date(1992, 1, 1)
])
result = pa.Array.from_pandas(data, mask=m)
expected = pd.Series([None, date(1991, 1, 1), None])
assert pa.Array.from_pandas(expected).equals(result)
def test_fixed_offset_timezone(self):
df = pd.DataFrame({
'a': [
pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.NaT
]
})
_check_pandas_roundtrip(df)
_check_serialize_components_roundtrip(df)
class TestConvertStringLikeTypes(object):
"""
Conversion tests for string and binary types.
"""
def test_unicode(self):
repeats = 1000
values = [u'foo', None, u'bar', u'mañana', np.nan]
df = pd.DataFrame({'strings': values * repeats})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_bytes_to_binary(self):
values = [u'qux', b'foo', None, bytearray(b'barz'), 'qux', np.nan]
df = pd.DataFrame({'strings': values})
table = pa.Table.from_pandas(df)
assert table[0].type == pa.binary()
values2 = [b'qux', b'foo', None, b'barz', b'qux', np.nan]
expected = pd.DataFrame({'strings': values2})
_check_pandas_roundtrip(df, expected)
@pytest.mark.large_memory
def test_bytes_exceed_2gb(self):
v1 = b'x' * 100000000
v2 = b'x' * 147483646
# ARROW-2227, hit exactly 2GB on the nose
df = pd.DataFrame({
'strings': [v1] * 20 + [v2] + ['x'] * 20
})
arr = pa.array(df['strings'])
assert isinstance(arr, pa.ChunkedArray)
assert arr.num_chunks == 2
arr = None
table = pa.Table.from_pandas(df)
assert table[0].data.num_chunks == 2
def test_fixed_size_bytes(self):
values = [b'foo', None, bytearray(b'bar'), None, None, b'hey']
df = pd.DataFrame({'strings': values})
schema = pa.schema([pa.field('strings', pa.binary(3))])
table = pa.Table.from_pandas(df, schema=schema)
assert table.schema[0].type == schema[0].type
assert table.schema[0].name == schema[0].name
result = table.to_pandas()
tm.assert_frame_equal(result, df)
def test_fixed_size_bytes_does_not_accept_varying_lengths(self):
values = [b'foo', None, b'ba', None, None, b'hey']
df = pd.DataFrame({'strings': values})
schema = pa.schema([pa.field('strings', pa.binary(3))])
with pytest.raises(pa.ArrowInvalid):
pa.Table.from_pandas(df, schema=schema)
def test_variable_size_bytes(self):
s = pd.Series([b'123', b'', b'a', None])
_check_series_roundtrip(s, type_=pa.binary())
def test_binary_from_bytearray(self):
s = pd.Series([bytearray(b'123'), bytearray(b''), bytearray(b'a'),
None])
# Explicitly set type
_check_series_roundtrip(s, type_=pa.binary())
# Infer type from bytearrays
_check_series_roundtrip(s, expected_pa_type=pa.binary())
def test_table_empty_str(self):
values = ['', '', '', '', '']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result1 = table.to_pandas(strings_to_categorical=False)
expected1 = pd.DataFrame({'strings': values})
tm.assert_frame_equal(result1, expected1, check_dtype=True)
result2 = table.to_pandas(strings_to_categorical=True)
expected2 = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result2, expected2, check_dtype=True)
def test_selective_categoricals(self):
values = ['', '', '', '', '']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
expected_str = pd.DataFrame({'strings': values})
expected_cat = pd.DataFrame({'strings': pd.Categorical(values)})
result1 = table.to_pandas(categories=['strings'])
tm.assert_frame_equal(result1, expected_cat, check_dtype=True)
result2 = table.to_pandas(categories=[])
tm.assert_frame_equal(result2, expected_str, check_dtype=True)
result3 = table.to_pandas(categories=('strings',))
tm.assert_frame_equal(result3, expected_cat, check_dtype=True)
result4 = table.to_pandas(categories=tuple())
tm.assert_frame_equal(result4, expected_str, check_dtype=True)
def test_table_str_to_categorical_without_na(self):
values = ['a', 'a', 'b', 'b', 'c']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result = table.to_pandas(strings_to_categorical=True)
expected = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result, expected, check_dtype=True)
with pytest.raises(pa.ArrowInvalid):
table.to_pandas(strings_to_categorical=True,
zero_copy_only=True)
def test_table_str_to_categorical_with_na(self):
values = [None, 'a', 'b', np.nan]
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result = table.to_pandas(strings_to_categorical=True)
expected = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result, expected, check_dtype=True)
with pytest.raises(pa.ArrowInvalid):
table.to_pandas(strings_to_categorical=True,
zero_copy_only=True)
# Regression test for ARROW-2101
def test_array_of_bytes_to_strings(self):
converted = pa.array(np.array([b'x'], dtype=object), pa.string())
assert converted.type == pa.string()
# Make sure that if an ndarray of bytes is passed to the array
# constructor and the type is string, it will fail if those bytes
# cannot be converted to utf-8
def test_array_of_bytes_to_strings_bad_data(self):
with pytest.raises(
pa.lib.ArrowInvalid,
match=("'(utf8|utf-8)' codec can't decode byte 0x80 "
"in position 0: invalid start byte")):
pa.array(np.array([b'\x80\x81'], dtype=object), pa.string())
def test_numpy_string_array_to_fixed_size_binary(self):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3')
converted = pa.array(arr, type=pa.binary(3))
expected = pa.array(list(arr), type=pa.binary(3))
assert converted.equals(expected)
mask = np.array([True, False, True])
converted = pa.array(arr, type=pa.binary(3), mask=mask)
expected = pa.array([b'foo', None, b'baz'], type=pa.binary(3))
assert converted.equals(expected)
with pytest.raises(pa.lib.ArrowInvalid,
match='Got bytestring of length 3 \(expected 4\)'):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3')
pa.array(arr, type=pa.binary(4))
with pytest.raises(pa.lib.ArrowInvalid,
match='Got bytestring of length 12 \(expected 3\)'):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|U3')
pa.array(arr, type=pa.binary(3))
class TestConvertDecimalTypes(object):
"""
Conversion test for decimal types.
"""
decimal32 = [
decimal.Decimal('-1234.123'),
decimal.Decimal('1234.439')
]
decimal64 = [
decimal.Decimal('-129934.123331'),
decimal.Decimal('129534.123731')
]
decimal128 = [
decimal.Decimal('394092382910493.12341234678'),
decimal.Decimal('-314292388910493.12343437128')
]
@pytest.mark.parametrize(('values', 'expected_type'), [
pytest.param(decimal32, pa.decimal128(7, 3), id='decimal32'),
pytest.param(decimal64, pa.decimal128(12, 6), id='decimal64'),
pytest.param(decimal128, pa.decimal128(26, 11), id='decimal128')
])
def test_decimal_from_pandas(self, values, expected_type):
expected = pd.DataFrame({'decimals': values})
table = pa.Table.from_pandas(expected, preserve_index=False)
field = pa.field('decimals', expected_type)
# schema's metadata is generated by from_pandas conversion
expected_schema = pa.schema([field], metadata=table.schema.metadata)
assert table.schema.equals(expected_schema)
@pytest.mark.parametrize('values', [
pytest.param(decimal32, id='decimal32'),
pytest.param(decimal64, id='decimal64'),
pytest.param(decimal128, id='decimal128')
])
def test_decimal_to_pandas(self, values):
expected = pd.DataFrame({'decimals': values})
converted = pa.Table.from_pandas(expected)
df = converted.to_pandas()
tm.assert_frame_equal(df, expected)
def test_decimal_fails_with_truncation(self):
data1 = [decimal.Decimal('1.234')]
type1 = pa.decimal128(10, 2)
with pytest.raises(pa.ArrowInvalid):
pa.array(data1, type=type1)
data2 = [decimal.Decimal('1.2345')]
type2 = pa.decimal128(10, 3)
with pytest.raises(pa.ArrowInvalid):
pa.array(data2, type=type2)
def test_decimal_with_different_precisions(self):
data = [
decimal.Decimal('0.01'),
decimal.Decimal('0.001'),
]
series = pd.Series(data)
array = pa.array(series)
assert array.to_pylist() == data
assert array.type == pa.decimal128(3, 3)
array = pa.array(data, type=pa.decimal128(12, 5))
expected = [decimal.Decimal('0.01000'), decimal.Decimal('0.00100')]
assert array.to_pylist() == expected
def test_decimal_with_None_explicit_type(self):
series = pd.Series([decimal.Decimal('3.14'), None])
_check_series_roundtrip(series, type_=pa.decimal128(12, 5))
# Test that having all None values still produces decimal array
series = pd.Series([None] * 2)
_check_series_roundtrip(series, type_=pa.decimal128(12, 5))
def test_decimal_with_None_infer_type(self):
series = pd.Series([decimal.Decimal('3.14'), None])
_check_series_roundtrip(series, expected_pa_type=pa.decimal128(3, 2))
class TestListTypes(object):
"""
Conversion tests for list<> types.
"""
def test_column_of_arrays(self):
df, schema = dataframe_with_arrays()
_check_pandas_roundtrip(df, schema=schema, expected_schema=schema)
table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)
# schema's metadata is generated by from_pandas conversion
expected_schema = schema.add_metadata(table.schema.metadata)
assert table.schema.equals(expected_schema)
for column in df.columns:
field = schema.field_by_name(column)
_check_array_roundtrip(df[column], type=field.type)
def test_column_of_arrays_to_py(self):
# Test regression in ARROW-1199 not caught in above test
dtype = 'i1'
arr = np.array([
np.arange(10, dtype=dtype),
np.arange(5, dtype=dtype),
None,
np.arange(1, dtype=dtype)
])
type_ = pa.list_(pa.int8())
parr = pa.array(arr, type=type_)
assert parr[0].as_py() == list(range(10))
assert parr[1].as_py() == list(range(5))
assert parr[2].as_py() is None
assert parr[3].as_py() == [0]
def test_column_of_lists(self):
df, schema = dataframe_with_lists()
_check_pandas_roundtrip(df, schema=schema, expected_schema=schema)
table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)
# schema's metadata is generated by from_pandas conversion
expected_schema = schema.add_metadata(table.schema.metadata)
assert table.schema.equals(expected_schema)
for column in df.columns:
field = schema.field_by_name(column)
_check_array_roundtrip(df[column], type=field.type)
def test_column_of_lists_first_empty(self):
# ARROW-2124
num_lists = [[], [2, 3, 4], [3, 6, 7, 8], [], [2]]
series = pd.Series([np.array(s, dtype=float) for s in num_lists])
arr = pa.array(series)
result = pd.Series(arr.to_pandas())
tm.assert_series_equal(result, series)
def test_column_of_lists_chunked(self):
# ARROW-1357
df = pd.DataFrame({
'lists': np.array([
[1, 2],
None,
[2, 3],
[4, 5],
[6, 7],
[8, 9]
], dtype=object)
})
schema = pa.schema([
pa.field('lists', pa.list_(pa.int64()))
])
t1 = pa.Table.from_pandas(df[:2], schema=schema)
t2 = pa.Table.from_pandas(df[2:], schema=schema)
table = pa.concat_tables([t1, t2])
result = table.to_pandas()
tm.assert_frame_equal(result, df)
def test_column_of_lists_chunked2(self):
data1 = [[0, 1], [2, 3], [4, 5], [6, 7], [10, 11],
[12, 13], [14, 15], [16, 17]]
data2 = [[8, 9], [18, 19]]
a1 = pa.array(data1)
a2 = pa.array(data2)
t1 = pa.Table.from_arrays([a1], names=['a'])
t2 = pa.Table.from_arrays([a2], names=['a'])
concatenated = pa.concat_tables([t1, t2])
result = concatenated.to_pandas()
expected = pd.DataFrame({'a': data1 + data2})
tm.assert_frame_equal(result, expected)
def test_column_of_lists_strided(self):
df, schema = dataframe_with_lists()
df = pd.concat([df] * 6, ignore_index=True)
arr = df['int64'].values[::3]
assert arr.strides[0] != 8
_check_array_roundtrip(arr)
def test_nested_lists_all_none(self):
data = np.array([[None, None], None], dtype=object)
arr = pa.array(data)
expected = pa.array(list(data))
assert arr.equals(expected)
assert arr.type == pa.list_(pa.null())
data2 = np.array([None, None, [None, None],
np.array([None, None], dtype=object)],
dtype=object)
arr = pa.array(data2)
expected = pa.array([None, None, [None, None], [None, None]])
assert arr.equals(expected)
def test_nested_lists_all_empty(self):
# ARROW-2128
data = pd.Series([[], [], []])
arr = pa.array(data)
expected = pa.array(list(data))
assert arr.equals(expected)
assert arr.type == pa.list_(pa.null())
def test_nested_list_first_empty(self):
# ARROW-2711
data = pd.Series([[], [u"a"]])
arr = pa.array(data)
expected = pa.array(list(data))
assert arr.equals(expected)
assert arr.type == pa.list_(pa.string())
def test_nested_smaller_ints(self):
# ARROW-1345, ARROW-2008, there were some type inference bugs happening
# before
data = pd.Series([np.array([1, 2, 3], dtype='i1'), None])
result = pa.array(data)
result2 = pa.array(data.values)
expected = pa.array([[1, 2, 3], None], type=pa.list_(pa.int8()))
assert result.equals(expected)
assert result2.equals(expected)
data3 = pd.Series([np.array([1, 2, 3], dtype='f4'), None])
result3 = pa.array(data3)
expected3 = pa.array([[1, 2, 3], None], type=pa.list_(pa.float32()))
assert result3.equals(expected3)
def test_infer_lists(self):
data = OrderedDict([
('nan_ints', [[None, 1], [2, 3]]),
('ints', [[0, 1], [2, 3]]),
('strs', [[None, u'b'], [u'c', u'd']]),
('nested_strs', [[[None, u'b'], [u'c', u'd']], None])
])
df = pd.DataFrame(data)
expected_schema = pa.schema([
pa.field('nan_ints', pa.list_(pa.int64())),
pa.field('ints', pa.list_(pa.int64())),
pa.field('strs', pa.list_(pa.string())),
pa.field('nested_strs', pa.list_(pa.list_(pa.string())))
])
_check_pandas_roundtrip(df, expected_schema=expected_schema)
def test_infer_numpy_array(self):
data = OrderedDict([
('ints', [
np.array([0, 1], dtype=np.int64),
np.array([2, 3], dtype=np.int64)
])
])
df = pd.DataFrame(data)
expected_schema = pa.schema([
pa.field('ints', pa.list_(pa.int64()))
])
_check_pandas_roundtrip(df, expected_schema=expected_schema)
@pytest.mark.parametrize('t,data,expected', [
(
pa.int64,
[[1, 2], [3], None],
[None, [3], None]
),
(
pa.string,
[[u'aaa', u'bb'], [u'c'], None],
[None, [u'c'], None]
),
(
pa.null,
[[None, None], [None], None],
[None, [None], None]
)
])
def test_array_from_pandas_typed_array_with_mask(self, t, data, expected):
m = np.array([True, False, True])
s = pd.Series(data)
result = pa.Array.from_pandas(s, mask=m, type=pa.list_(t()))
assert pa.Array.from_pandas(expected,
type=pa.list_(t())).equals(result)
def test_empty_list_roundtrip(self):
empty_list_array = np.empty((3,), dtype=object)
empty_list_array.fill([])
df = pd.DataFrame({'a': np.array(['1', '2', '3']),
'b': empty_list_array})
tbl = pa.Table.from_pandas(df)
result = tbl.to_pandas()
tm.assert_frame_equal(result, df)
def test_array_from_nested_arrays(self):
df, schema = dataframe_with_arrays()
for field in schema:
arr = df[field.name].values
expected = pa.array(list(arr), type=field.type)
result = pa.array(arr)
assert result.type == field.type # == list<scalar>
assert result.equals(expected)
class TestConvertStructTypes(object):
"""
Conversion tests for struct types.
"""
def test_to_pandas(self):
ints = pa.array([None, 2, 3], type=pa.int64())
strs = pa.array([u'a', None, u'c'], type=pa.string())
bools = pa.array([True, False, None], type=pa.bool_())
arr = pa.StructArray.from_arrays(
[ints, strs, bools],
['ints', 'strs', 'bools'])
expected = pd.Series([
{'ints': None, 'strs': u'a', 'bools': True},
{'ints': 2, 'strs': None, 'bools': False},
{'ints': 3, 'strs': u'c', 'bools': None},
])
series = pd.Series(arr.to_pandas())
tm.assert_series_equal(series, expected)
def test_from_numpy(self):
dt = np.dtype([('x', np.int32),
(('y_title', 'y'), np.bool_)])
ty = pa.struct([pa.field('x', pa.int32()),
pa.field('y', pa.bool_())])
data = np.array([], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == []
data = np.array([(42, True), (43, False)], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == [{'x': 42, 'y': True},
{'x': 43, 'y': False}]
# With mask
arr = pa.array(data, mask=np.bool_([False, True]), type=ty)
assert arr.to_pylist() == [{'x': 42, 'y': True}, None]
# Trivial struct type
dt = np.dtype([])
ty = pa.struct([])
data = np.array([], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == []
data = np.array([(), ()], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == [{}, {}]
def test_from_numpy_nested(self):
dt = np.dtype([('x', np.dtype([('xx', np.int8),
('yy', np.bool_)])),
('y', np.int16)])
ty = pa.struct([pa.field('x', pa.struct([pa.field('xx', pa.int8()),
pa.field('yy', pa.bool_())])),
pa.field('y', pa.int16())])
data = np.array([], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == []
data = np.array([((1, True), 2), ((3, False), 4)], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == [{'x': {'xx': 1, 'yy': True}, 'y': 2},
{'x': {'xx': 3, 'yy': False}, 'y': 4}]
@pytest.mark.large_memory
def test_from_numpy_large(self):
# Exercise rechunking + nulls
target_size = 3 * 1024**3 # 4GB
dt = np.dtype([('x', np.float64), ('y', 'object')])
bs = 65536 - dt.itemsize
block = b'.' * bs
n = target_size // (bs + dt.itemsize)
data = np.zeros(n, dtype=dt)
data['x'] = np.random.random_sample(n)
data['y'] = block
# Add implicit nulls
data['x'][data['x'] < 0.2] = np.nan
ty = pa.struct([pa.field('x', pa.float64()),
pa.field('y', pa.binary(bs))])
arr = pa.array(data, type=ty, from_pandas=True)
assert arr.num_chunks == 2
def iter_chunked_array(arr):
for chunk in arr.iterchunks():
for item in chunk:
yield item
def check(arr, data, mask=None):
assert len(arr) == len(data)
xs = data['x']
ys = data['y']
for i, obj in enumerate(iter_chunked_array(arr)):
try:
d = obj.as_py()
if mask is not None and mask[i]:
assert d is None
else:
x = xs[i]
if np.isnan(x):
assert d['x'] is None
else:
assert d['x'] == x
assert d['y'] == ys[i]
except Exception:
print("Failed at index", i)
raise
check(arr, data)
del arr
# Now with explicit mask
mask = np.random.random_sample(n) < 0.2
arr = pa.array(data, type=ty, mask=mask, from_pandas=True)
assert arr.num_chunks == 2
check(arr, data, mask)
del arr
def test_from_numpy_bad_input(self):
ty = pa.struct([pa.field('x', pa.int32()),
pa.field('y', pa.bool_())])
dt = np.dtype([('x', np.int32),
('z', np.bool_)])
data = np.array([], dtype=dt)
with pytest.raises(TypeError,
match="Missing field 'y'"):
pa.array(data, type=ty)
data = np.int32([])
with pytest.raises(TypeError,
match="Expected struct array"):
pa.array(data, type=ty)
class TestZeroCopyConversion(object):
"""
Tests that zero-copy conversion works with some types.
"""
def test_zero_copy_success(self):
result = pa.array([0, 1, 2]).to_pandas(zero_copy_only=True)
npt.assert_array_equal(result, [0, 1, 2])
def test_zero_copy_dictionaries(self):
arr = pa.DictionaryArray.from_arrays(
np.array([0, 0]),
np.array([5]))
result = arr.to_pandas(zero_copy_only=True)
values = pd.Categorical([5, 5])
tm.assert_series_equal(pd.Series(result), pd.Series(values),
check_names=False)
def check_zero_copy_failure(self, arr):
with pytest.raises(pa.ArrowInvalid):
arr.to_pandas(zero_copy_only=True)
def test_zero_copy_failure_on_object_types(self):
self.check_zero_copy_failure(pa.array(['A', 'B', 'C']))
def test_zero_copy_failure_with_int_when_nulls(self):
self.check_zero_copy_failure(pa.array([0, 1, None]))
def test_zero_copy_failure_with_float_when_nulls(self):
self.check_zero_copy_failure(pa.array([0.0, 1.0, None]))
def test_zero_copy_failure_on_bool_types(self):
self.check_zero_copy_failure(pa.array([True, False]))
def test_zero_copy_failure_on_list_types(self):
arr = pa.array([[1, 2], [8, 9]], type=pa.list_(pa.int64()))
self.check_zero_copy_failure(arr)
def test_zero_copy_failure_on_timestamp_types(self):
arr = np.array(['2007-07-13'], dtype='datetime64[ns]')
self.check_zero_copy_failure(pa.array(arr))
# This function must be at the top-level for Python 2.7's multiprocessing
def _threaded_conversion():
df = _alltypes_example()
_check_pandas_roundtrip(df, use_threads=True)
_check_pandas_roundtrip(df, use_threads=True, as_batch=True)
class TestConvertMisc(object):
"""
Miscellaneous conversion tests.
"""
type_pairs = [
(np.int8, pa.int8()),
(np.int16, pa.int16()),
(np.int32, pa.int32()),
(np.int64, pa.int64()),
(np.uint8, pa.uint8()),
(np.uint16, pa.uint16()),
(np.uint32, pa.uint32()),
(np.uint64, pa.uint64()),
(np.float16, pa.float16()),
(np.float32, pa.float32()),
(np.float64, pa.float64()),
# XXX unsupported
# (np.dtype([('a', 'i2')]), pa.struct([pa.field('a', pa.int16())])),
(np.object, pa.string()),
(np.object, pa.binary()),
(np.object, pa.binary(10)),
(np.object, pa.list_(pa.int64())),
]
def test_all_none_objects(self):
df = pd.DataFrame({'a': [None, None, None]})
_check_pandas_roundtrip(df)
def test_all_none_category(self):
df = pd.DataFrame({'a': [None, None, None]})
df['a'] = df['a'].astype('category')
_check_pandas_roundtrip(df)
def test_empty_arrays(self):
for dtype, pa_type in self.type_pairs:
arr = np.array([], dtype=dtype)
_check_array_roundtrip(arr, type=pa_type)
def test_threaded_conversion(self):
_threaded_conversion()
def test_threaded_conversion_multiprocess(self):
# Parallel conversion should work from child processes too (ARROW-2963)
pool = mp.Pool(2)
try:
pool.apply(_threaded_conversion)
finally:
pool.close()
pool.join()
def test_category(self):
repeats = 5
v1 = ['foo', None, 'bar', 'qux', np.nan]
v2 = [4, 5, 6, 7, 8]
v3 = [b'foo', None, b'bar', b'qux', np.nan]
df = pd.DataFrame({'cat_strings': pd.Categorical(v1 * repeats),
'cat_ints': pd.Categorical(v2 * repeats),
'cat_binary': pd.Categorical(v3 * repeats),
'cat_strings_ordered': pd.Categorical(
v1 * repeats, categories=['bar', 'qux', 'foo'],
ordered=True),
'ints': v2 * repeats,
'ints2': v2 * repeats,
'strings': v1 * repeats,
'strings2': v1 * repeats,
'strings3': v3 * repeats})
_check_pandas_roundtrip(df)
arrays = [
pd.Categorical(v1 * repeats),
pd.Categorical(v2 * repeats),
pd.Categorical(v3 * repeats)
]
for values in arrays:
_check_array_roundtrip(values)
def test_empty_category(self):
# ARROW-2443
df = pd.DataFrame({'cat': pd.Categorical([])})
_check_pandas_roundtrip(df)
def test_mixed_types_fails(self):
data = pd.DataFrame({'a': ['a', 1, 2.0]})
with pytest.raises(pa.ArrowTypeError):
pa.Table.from_pandas(data)
data = pd.DataFrame({'a': [1, True]})
with pytest.raises(pa.ArrowTypeError):
pa.Table.from_pandas(data)
data = pd.DataFrame({'a': ['a', 1, 2.0]})
expected_msg = 'Conversion failed for column a'
with pytest.raises(pa.ArrowTypeError, match=expected_msg):
pa.Table.from_pandas(data)
def test_strided_data_import(self):
cases = []
columns = ['a', 'b', 'c']
N, K = 100, 3
random_numbers = np.random.randn(N, K).copy() * 100
numeric_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8',
'f4', 'f8']
for type_name in numeric_dtypes:
cases.append(random_numbers.astype(type_name))
# strings
cases.append(np.array([tm.rands(10) for i in range(N * K)],
dtype=object)
.reshape(N, K).copy())
# booleans
boolean_objects = (np.array([True, False, True] * N, dtype=object)
.reshape(N, K).copy())
# add some nulls, so dtype comes back as objects
boolean_objects[5] = None
cases.append(boolean_objects)
cases.append(np.arange("2016-01-01T00:00:00.001", N * K,
dtype='datetime64[ms]')
.reshape(N, K).copy())
strided_mask = (random_numbers > 0).astype(bool)[:, 0]
for case in cases:
df = pd.DataFrame(case, columns=columns)
col = df['a']
_check_pandas_roundtrip(df)
_check_array_roundtrip(col)
_check_array_roundtrip(col, mask=strided_mask)
def test_all_nones(self):
def _check_series(s):
converted = pa.array(s)
assert isinstance(converted, pa.NullArray)
assert len(converted) == 3
assert converted.null_count == 3
assert converted[0] is pa.NA
_check_series(pd.Series([None] * 3, dtype=object))
_check_series(pd.Series([np.nan] * 3, dtype=object))
_check_series(pd.Series([np.sqrt(-1)] * 3, dtype=object))
def test_partial_schema(self):
data = OrderedDict([
('a', [0, 1, 2, 3, 4]),
('b', np.array([-10, -5, 0, 5, 10], dtype=np.int32)),
('c', [-10, -5, 0, 5, 10])
])
df = pd.DataFrame(data)
partial_schema = pa.schema([
pa.field('a', pa.int64()),
pa.field('b', pa.int32())
])
expected_schema = pa.schema([
pa.field('a', pa.int64()),
pa.field('b', pa.int32()),
pa.field('c', pa.int64())
])
_check_pandas_roundtrip(df, schema=partial_schema,
expected_schema=expected_schema)
def test_table_batch_empty_dataframe(self):
df = pd.DataFrame({})
_check_pandas_roundtrip(df)
_check_pandas_roundtrip(df, as_batch=True)
df2 = pd.DataFrame({}, index=[0, 1, 2])
_check_pandas_roundtrip(df2, preserve_index=True)
_check_pandas_roundtrip(df2, as_batch=True, preserve_index=True)
def test_convert_empty_table(self):
arr = pa.array([], type=pa.int64())
tm.assert_almost_equal(arr.to_pandas(), np.array([], dtype=np.int64))
arr = pa.array([], type=pa.string())
tm.assert_almost_equal(arr.to_pandas(), np.array([], dtype=object))
arr = pa.array([], type=pa.list_(pa.int64()))
tm.assert_almost_equal(arr.to_pandas(), np.array([], dtype=object))
arr = pa.array([], type=pa.struct([pa.field('a', pa.int64())]))
tm.assert_almost_equal(arr.to_pandas(), np.array([], dtype=object))
def test_non_natural_stride(self):
"""
ARROW-2172: converting from a Numpy array with a stride that's
not a multiple of itemsize.
"""
dtype = np.dtype([('x', np.int32), ('y', np.int16)])
data = np.array([(42, -1), (-43, 2)], dtype=dtype)
assert data.strides == (6,)
arr = pa.array(data['x'], type=pa.int32())
assert arr.to_pylist() == [42, -43]
arr = pa.array(data['y'], type=pa.int16())
assert arr.to_pylist() == [-1, 2]
def test_mixed_integer_columns(self):
row = [[], []]
df = pd.DataFrame(data=[row], columns=['foo', 123])
expected_df = pd.DataFrame(data=[row], columns=['foo', '123'])
_check_pandas_roundtrip(df, expected=expected_df, preserve_index=True)
def _fully_loaded_dataframe_example():
from distutils.version import LooseVersion
index = pd.MultiIndex.from_arrays([
pd.date_range('2000-01-01', periods=5).repeat(2),
np.tile(np.array(['foo', 'bar'], dtype=object), 5)
])
c1 = pd.date_range('2000-01-01', periods=10)
data = {
0: c1,
1: c1.tz_localize('utc'),
2: c1.tz_localize('US/Eastern'),
3: c1[::2].tz_localize('utc').repeat(2).astype('category'),
4: ['foo', 'bar'] * 5,
5: pd.Series(['foo', 'bar'] * 5).astype('category').values,
6: [True, False] * 5,
7: np.random.randn(10),
8: np.random.randint(0, 100, size=10),
9: pd.period_range('2013', periods=10, freq='M')
}
if LooseVersion(pd.__version__) >= '0.21':
# There is an issue with pickling IntervalIndex in pandas 0.20.x
data[10] = pd.interval_range(start=1, freq=1, periods=10)
return pd.DataFrame(data, index=index)
@pytest.mark.parametrize('columns', ([b'foo'], ['foo']))
def test_roundtrip_with_bytes_unicode(columns):
df = pd.DataFrame(columns=columns)
table1 = pa.Table.from_pandas(df)
table2 = pa.Table.from_pandas(table1.to_pandas())
assert table1.equals(table2)
assert table1.schema.equals(table2.schema)
assert table1.schema.metadata == table2.schema.metadata
def _check_serialize_components_roundtrip(df):
ctx = pa.default_serialization_context()
components = ctx.serialize(df).to_components()
deserialized = ctx.deserialize_components(components)
tm.assert_frame_equal(df, deserialized)
def test_serialize_deserialize_pandas():
# ARROW-1784, serialize and deserialize DataFrame by decomposing
# BlockManager
df = _fully_loaded_dataframe_example()
_check_serialize_components_roundtrip(df)
def _pytime_from_micros(val):
microseconds = val % 1000000
val //= 1000000
seconds = val % 60
val //= 60
minutes = val % 60
hours = val // 60
return time(hours, minutes, seconds, microseconds)
def _pytime_to_micros(pytime):
return (pytime.hour * 3600000000 +
pytime.minute * 60000000 +
pytime.second * 1000000 +
pytime.microsecond)
def test_convert_unsupported_type_error_message():
# ARROW-1454
df = pd.DataFrame({
't1': pd.date_range('2000-01-01', periods=20),
't2': pd.date_range('2000-05-01', periods=20)
})
# timedelta64 as yet unsupported
df['diff'] = df.t2 - df.t1
expected_msg = 'Conversion failed for column diff with type timedelta64'
with pytest.raises(pa.ArrowNotImplementedError, match=expected_msg):
pa.Table.from_pandas(df)
| apache-2.0 |
peterk87/sistr_cmd | setup.py | 1 | 1646 | from distutils.core import setup
from setuptools import find_packages
from sistr.version import __version__
classifiers = """
Development Status :: 4 - Beta
Environment :: Console
License :: OSI Approved :: Apache Software License
Intended Audience :: Science/Research
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Bio-Informatics
Programming Language :: Python
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: Implementation :: CPython
Operating System :: POSIX :: Linux
""".strip().split('\n')
setup(
name='sistr_cmd',
version=__version__,
packages=find_packages(exclude=['tests']),
url='https://github.com/phac-nml/sistr_cmd',
license='Apache 2.0',
author='Peter Kruczkiewicz',
author_email='[email protected]',
description=('Serovar predictions from Salmonella whole-genome sequence assemblies by determination of antigen gene'
'and cgMLST gene alleles using BLAST. Mash MinHash can also be used for serovar prediction.'),
keywords='Salmonella serotyping genotyping cgMLST BLAST Mash MinHash',
classifiers=classifiers,
package_dir={'sistr':'sistr'},
include_package_data=True,
install_requires=[
'numpy>=1.11.1',
'pandas>=0.18.1',
'tables>=3.3.0',
'pycurl>=7.43.0',
'scipy>=1.1.0'
],
extras_require={
'test': ['pytest>=2.9.2',],
},
entry_points={
'console_scripts': [
'sistr=sistr.sistr_cmd:main',
],
},
)
| apache-2.0 |
belltailjp/scikit-learn | sklearn/manifold/t_sne.py | 106 | 20057 | # Author: Alexander Fabisch -- <[email protected]>
# License: BSD 3 clause (C) 2014
# This is the standard t-SNE implementation. There are faster modifications of
# the algorithm:
# * Barnes-Hut-SNE: reduces the complexity of the gradient computation from
# N^2 to N log N (http://arxiv.org/abs/1301.3342)
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
import numpy as np
from scipy import linalg
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..utils.extmath import _ravel
from ..decomposition import RandomizedPCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
conditional_P = _utils._binary_search_perplexity(
distances, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _kl_divergence(params, P, alpha, n_samples, n_components):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
alpha : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= alpha
n **= (alpha + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
# Gradient: dC/dY
grad = np.ndarray((n_samples, n_components))
PQd = squareform((P - Q) * n)
for i in range(n_samples):
np.dot(_ravel(PQd[i]), X_embedded[i] - X_embedded, out=grad[i])
grad = grad.ravel()
c = 2.0 * (alpha + 1.0) / alpha
grad *= c
return kl_divergence, grad
def _gradient_descent(objective, p0, it, n_iter, n_iter_without_progress=30,
momentum=0.5, learning_rate=1000.0, min_gain=0.01,
min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0,
args=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.5)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 1000.0)
The learning rate should be extremely high for t-SNE! Values in the
range [100.0, 1000.0] are common.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
min_error_diff : float, optional (default: 1e-7)
If the absolute difference of two successive cost function values
is below this threshold, the optimization will be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = 0
for i in range(it, n_iter):
new_error, grad = objective(p, *args)
error_diff = np.abs(new_error - error)
error = new_error
grad_norm = linalg.norm(grad)
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if min_grad_norm >= grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
if min_error_diff >= error_diff:
if verbose >= 2:
print("[t-SNE] Iteration %d: error difference %f. Finished."
% (i + 1, error_diff))
break
inc = update * grad >= 0.0
dec = np.invert(inc)
gains[inc] += 0.05
gains[dec] *= 0.95
np.clip(gains, min_gain, np.inf)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if verbose >= 2 and (i + 1) % 10 == 0:
print("[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
% (i + 1, error, grad_norm))
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i (r(i, j) - k)}
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selcting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 4.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 1000)
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 200.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string, optional (default: "random")
Initialization of embedding. Possible options are 'random' and 'pca'.
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton. Note that different initializations
might result in different local minima of the cost function.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = TSNE(n_components=2, random_state=0)
>>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([[ 887.28..., 238.61...],
[ -714.79..., 3243.34...],
[ 957.30..., -2505.78...],
[-1130.28..., -974.78...])
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
"""
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000,
metric="euclidean", init="random", verbose=0,
random_state=None):
if init not in ["pca", "random"]:
raise ValueError("'init' must be either 'pca' or 'random'")
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model using X as training data.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64)
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is "
"%f" % self.early_exaggeration)
if self.n_iter < 200:
raise ValueError("n_iter should be at least 200")
if self.metric == "precomputed":
if self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be used "
"with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric, squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
# Degrees of freedom of the Student's t-distribution. The suggestion
# alpha = n_components - 1 comes from "Learning a Parametric Embedding
# by Preserving Local Structure" Laurens van der Maaten, 2009.
alpha = max(self.n_components - 1.0, 1)
n_samples = X.shape[0]
self.training_data_ = X
P = _joint_probabilities(distances, self.perplexity, self.verbose)
if self.init == 'pca':
pca = RandomizedPCA(n_components=self.n_components,
random_state=random_state)
X_embedded = pca.fit_transform(X)
elif self.init == 'random':
X_embedded = None
else:
raise ValueError("Unsupported initialization scheme: %s"
% self.init)
self.embedding_ = self._tsne(P, alpha, n_samples, random_state,
X_embedded=X_embedded)
return self
def _tsne(self, P, alpha, n_samples, random_state, X_embedded=None):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with three stages:
# * early exaggeration with momentum 0.5
# * early exaggeration with momentum 0.8
# * final optimization with momentum 0.8
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
if X_embedded is None:
# Initialize embedding randomly
X_embedded = 1e-4 * random_state.randn(n_samples,
self.n_components)
params = X_embedded.ravel()
# Early exaggeration
P *= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=0, n_iter=50, momentum=0.5,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=100, momentum=0.8,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations with early "
"exaggeration: %f" % (it + 1, error))
# Final optimization
P /= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=self.n_iter,
momentum=0.8, learning_rate=self.learning_rate,
verbose=self.verbose, args=[P, alpha, n_samples,
self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations: %f" % (it + 1, error))
X_embedded = params.reshape(n_samples, self.n_components)
return X_embedded
def fit_transform(self, X, y=None):
"""Transform X to the embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
self.fit(X)
return self.embedding_
| bsd-3-clause |
paladin74/neural-network-animation | matplotlib/tests/test_style.py | 10 | 1977 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import shutil
import tempfile
from contextlib import contextmanager
import matplotlib as mpl
from matplotlib import style
from matplotlib.style.core import USER_LIBRARY_PATHS, STYLE_EXTENSION
import six
PARAM = 'image.cmap'
VALUE = 'pink'
DUMMY_SETTINGS = {PARAM: VALUE}
@contextmanager
def temp_style(style_name, settings=None):
"""Context manager to create a style sheet in a temporary directory."""
settings = DUMMY_SETTINGS
temp_file = '%s.%s' % (style_name, STYLE_EXTENSION)
# Write style settings to file in the temp directory.
tempdir = tempfile.mkdtemp()
with open(os.path.join(tempdir, temp_file), 'w') as f:
for k, v in six.iteritems(settings):
f.write('%s: %s' % (k, v))
# Add temp directory to style path and reload so we can access this style.
USER_LIBRARY_PATHS.append(tempdir)
style.reload_library()
try:
yield
finally:
shutil.rmtree(tempdir)
style.reload_library()
def test_available():
with temp_style('_test_', DUMMY_SETTINGS):
assert '_test_' in style.available
def test_use():
mpl.rcParams[PARAM] = 'gray'
with temp_style('test', DUMMY_SETTINGS):
with style.context('test'):
assert mpl.rcParams[PARAM] == VALUE
def test_use_url():
with temp_style('test', DUMMY_SETTINGS):
with style.context('https://gist.github.com/adrn/6590261/raw'):
assert mpl.rcParams['axes.facecolor'] == "#adeade"
def test_context():
mpl.rcParams[PARAM] = 'gray'
with temp_style('test', DUMMY_SETTINGS):
with style.context('test'):
assert mpl.rcParams[PARAM] == VALUE
# Check that this value is reset after the exiting the context.
assert mpl.rcParams[PARAM] == 'gray'
if __name__ == '__main__':
from numpy import testing
testing.run_module_suite()
| mit |
alexsavio/hansel | hansel/crumb.py | 1 | 29611 | # -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Crumb class: the smart path model class.
"""
import os
import pathlib
import re
from collections import OrderedDict
from copy import deepcopy
from typing import List, Dict, Iterator, Tuple
from hansel._utils import (
_first_txt,
_build_path,
_arg_names,
_find_arg_depth,
_check,
_depth_names,
_depth_names_regexes,
_has_arg,
_is_crumb_arg,
_split_exists,
_split,
_touch,
has_crumbs,
is_valid,
)
from hansel.utils import (
list_subpaths,
fnmatch_filter,
regex_match_filter,
CrumbArgsSequence, CrumbArgsSequences)
class Crumb(object):
""" The crumb path model class.
Parameters
----------
crumb_path: str
A file or folder path with crumb arguments. See Examples.
ignore_list: sequence of str
A list of `fnmatch` patterns of filenames to be ignored.
regex: str
Choices: 'fnmatch', 're' or 're.ignorecase'
If 'fnmatch' will use fnmatch regular expressions to
match any expression you may have in a crumb argument.
If 're' will use re.match.
If 're.ignorecase' will use re.match and pass re.IGNORE_CASE to re.compile.
Examples
--------
>>> crumb = Crumb("{base_dir}/raw/{subject_id}/{session_id}/{modality}/{image}")
>>> cr = Crumb(os.path.join(os.path.expanduser('~'), '{user_folder}'))
"""
def __init__(self, crumb_path: str, ignore_list: List[str] = None, regex: str = 'fnmatch'):
self._path = _check(crumb_path)
self._argval = {} # what is the value of the argument in the current path, if any has been set.
self._re_method = regex
self._re_args = None
if ignore_list is None:
ignore_list = []
self._ignore = ignore_list
self._update()
def _update(self):
""" Clean up, parse the current crumb path and fill the internal
members for functioning."""
self._set_match_function()
def _set_match_function(self):
""" Update self._match_filter with a regular expression
matching function depending on the value of self._re_method."""
if self._re_method == 'fnmatch':
self._match_filter = fnmatch_filter
elif self._re_method == 're':
self._match_filter = regex_match_filter
elif self._re_method == 're.ignorecase':
self._match_filter = regex_match_filter
self._re_args = (re.IGNORECASE,)
else:
raise ValueError('Expected regex method value to be "fnmatch", "re" or "re.ignorecase"'
', got {}.'.format(self._re_method))
def is_valid(self, crumb_path: str = None) -> bool:
""" Return True if the `crumb_path` is a valid crumb path, False otherwise.
If `crumb_path` is None, will use `self.path` instead.
"""
if crumb_path is None:
crumb_path = self.path
return is_valid(crumb_path)
@property
def patterns(self):
""" Returns a dict with the arg_names as keys and regular expressions as values."""
return {arg: rgx for _, (arg, rgx) in _depth_names_regexes(self._path) if rgx}
def set_pattern(self, arg_name: str, arg_regex: str):
""" Set the pattern `arg_regex` to the given argument `arg_name`."""
if not _has_arg(self.path, arg_name):
raise KeyError('Crumb argument {} is not present in {}.'.format(arg_name, self))
self._path = _build_path(
self._path,
arg_values={},
with_regex=True,
regexes={arg_name: arg_regex}
)
def set_patterns(self, **kwargs):
""" Set the pattern to the given arguments as keywords. """
for arg, pat in kwargs.items():
self.set_pattern(arg, pat)
def clear_pattern(self, arg_name: str):
""" Clear the pattern of the given argument `arg_name`."""
self.set_pattern(arg_name, '')
def clear(self, arg_name: str):
""" Clear the value of the given argument `arg_name`."""
del self._argval[arg_name]
@property
def arg_values(self) -> Dict[str, str]:
""" Return a dict with the arg_names and values of the already replaced crumb arguments."""
return self._argval
@property
def path(self) -> str:
"""Return the current crumb path string."""
return _build_path(self._path, arg_values=self.arg_values, with_regex=True)
@path.setter
def path(self, value: str):
""" Set the current crumb path string and updates the internal members.
Parameters
----------
value: str
A file or folder path with crumb arguments. See Examples in class docstring.
"""
self._path = value
self._update()
def has_crumbs(self, crumb_path: str = None) -> bool:
""" Return True if the current path has open crumb arguments, False otherwise.
If `crumb_path` is None will test on `self.path` instead.
"""
if crumb_path is None:
crumb_path = self.path
return has_crumbs(crumb_path)
def _open_arg_items(self):
""" Return an iterator to the crumb _argidx items in `self` that have
not been replaced yet. In the same order as they appear in the crumb path.
Returns
-------
depth_args: generator of 2-tuple of int and str
For each item will return the depth index of the undefined crumb
argument and its name.
Note
----
I know that there is shorter/faster ways to program this but I wanted to maintain the
order of the arguments in argidx in the result of this function.
"""
for depth, arg_name in _depth_names(self.path):
yield depth, arg_name
def _last_open_arg(self):
""" Return the idx and name of the last (right-most) open argument."""
open_args = list(self._open_arg_items())
if not open_args:
return None, None
for dpth, arg in reversed(open_args):
return dpth, arg
def _first_open_arg(self):
""" Return the idx and name of the first (left-most) open argument."""
for dpth, arg in self._open_arg_items():
return dpth, arg
def _is_first_open_arg(self, arg_name: str) -> bool:
""" Return True if `arg_name` is the first open argument."""
# Take into account that self._argidx is OrderedDict
return arg_name == self._first_open_arg()[1]
def has_set(self, arg_name: str) -> bool:
""" Return True if the argument `arg_name` has been set to a
specific value, False if it is still a crumb argument."""
return arg_name not in set(self.open_args())
def open_args(self) -> Iterator[str]:
""" Return an iterator to the crumb argument names in `self`
that have not been replaced yet.
In the same order as they appear in the crumb path."""
for _, arg_name in self._open_arg_items():
yield arg_name
def all_args(self) -> Iterator[str]:
""" Return an iterator to all the crumb argument names in `self`,
first the open ones and then the replaced ones.
Returns
-------
crumb_args: set of str
"""
yield from _arg_names(self._path)
def copy(self, crumb: 'Crumb' = None) -> 'Crumb':
""" Return a deep copy of the given `crumb`.
If `crumb` is None will return a copy of self.
Parameters
----------
crumb: str or Crumb
Returns
-------
copy: Crumb
"""
if crumb is None:
crumb = self
if isinstance(crumb, Crumb):
nucr = Crumb(
crumb._path,
ignore_list=crumb._ignore,
regex=crumb._re_method
)
nucr._argval = deepcopy(crumb._argval)
return nucr
if isinstance(crumb, str):
return Crumb.from_path(crumb)
raise TypeError("Expected a Crumb or a str to copy, "
"got {}.".format(type(crumb)))
def isabs(self) -> bool:
""" Return True if the current crumb path has an absolute path,
False otherwise.
This means that its path is valid and starts with a `os.path.sep` character
or hard disk letter.
"""
subp = _first_txt(self.path)
return os.path.isabs(subp)
def abspath(self, first_is_basedir: bool = False) -> 'Crumb':
""" Return a copy of `self` with an absolute crumb path.
Add as prefix the absolute path to the current directory if
the current crumb is not absolute.
Parameters
----------
first_is_basedir: bool
If True and the current crumb path starts with a crumb argument and first_is_basedir,
the first argument will be replaced by the absolute path to the current dir,
otherwise the absolute path to the current dir will be added as a prefix.
Returns
-------
abs_crumb: Crumb
"""
nucr = self.copy()
if not nucr.isabs():
nucr._path = self._abspath(first_is_basedir=first_is_basedir)
return nucr
def _abspath(self, first_is_basedir: bool = False) -> str:
""" Return the absolute path of the current crumb path.
Parameters
----------
first_is_basedir: bool
If True and the current crumb path starts with a crumb argument and first_is_basedir,
the first argument will be replaced by the absolute path to the current dir,
otherwise the absolute path to the current dir will be added as a prefix.
Returns
-------
abspath: str
"""
if os.path.isabs(self._path):
return self._path
splits = self._path.split(os.path.sep)
basedir = [os.path.abspath(os.path.curdir)]
if _is_crumb_arg(splits[0]):
if first_is_basedir:
splits.pop(0)
basedir.extend(splits)
return os.path.sep.join(basedir)
def split(self) -> Tuple[str, str]:
""" Split `crumb_path` in two parts, the first is the base folder without
any crumb argument and the second is the rest of `crumb_path` beginning
with the first crumb argument.
If `crumb_path` starts with an argument, will return ('', crumb_path).
"""
return _split(self.path)
@classmethod
def from_path(cls, crumb_path: [str, 'Crumb', pathlib.Path]) -> 'Crumb':
""" Create an instance of Crumb out of `crumb_path`.
Parameters
----------
val: str or Crumb or pathlib.Path
Returns
-------
path: Crumb
"""
if isinstance(crumb_path, Crumb):
return crumb_path.copy()
elif isinstance(crumb_path, pathlib.Path):
return cls(str(crumb_path))
elif isinstance(crumb_path, str):
return cls(crumb_path)
else:
raise TypeError("Expected a `val` to be a `str`, got {}.".format(type(crumb_path)))
def _arg_values(self, arg_name: str, arg_values: CrumbArgsSequence = None) -> CrumbArgsSequences:
""" Return the existing values in the file system for the crumb argument
with name `arg_name`.
The `arg_values` must be a sequence with the tuples with valid values of the dependent
(previous in the path) crumb arguments.
The format of `arg_values` work in such a way that `self._path.format(dict(arg_values[0]))`
would give me a valid path or crumb.
Parameters
----------
arg_name: str
arg_values: list of tuples
Returns
-------
vals: list of tuples
Raises
------
ValueError: if `arg_values` is None and `arg_name` is not the
first crumb argument in self._path
AttributeError: if the path is not absolute
IOError: if this crosses to any path that is non-existing.
"""
# if arg_name is not None and arg_values is None:
# if arg_name in self.arg_values:
# return [[(arg_name, self.arg_values[arg_name])]]
if arg_values is None and not self._is_first_open_arg(arg_name):
raise ValueError("Cannot get the list of values for {} if"
" the previous arguments are not filled"
" in `paths`.".format(arg_name))
path = self.path
dpth, arg_name, arg_regex = _find_arg_depth(path, arg_name)
splt = path.split(os.path.sep)
if dpth == len(splt) - 1: # this means we have to list files too
just_dirs = False
else: # this means we have to list folders
just_dirs = True
if arg_values is None:
vals = self._arg_values_from_base(
basedir=os.path.sep.join(splt[:dpth]),
arg_name=arg_name,
arg_regex=arg_regex,
just_dirs=just_dirs
)
else:
vals = self._extend_arg_values(
arg_values=arg_values,
arg_name=arg_name,
arg_regex=arg_regex,
just_dirs=just_dirs
)
return vals
def _extend_arg_values(
self,
arg_values: CrumbArgsSequence,
arg_name: str,
arg_regex: str,
just_dirs: bool
) -> CrumbArgsSequences:
""" Return an extended copy of `arg_values` with valid values for `arg_name`."""
path = self.path
vals = []
for aval in arg_values:
# create the part of the crumb path that is already specified
nupath = _split(_build_path(path, arg_values=dict(aval)))[0]
# THIS HAPPENS, LEAVE IT. TODO: make a test for this line
if not os.path.exists(nupath):
continue
paths = list_subpaths(
nupath,
just_dirs=just_dirs,
ignore=self._ignore,
pattern=arg_regex,
filter_func=self._match_filter
)
# extend `val` tuples with the new list of values for `aval`
vals.extend([aval + [(arg_name, sp)] for sp in paths])
return vals
def _arg_values_from_base(self, basedir: str, arg_name: str, arg_regex: str, just_dirs: bool) -> CrumbArgsSequences:
""" Return a map of arg values for `arg_name` from the `basedir`."""
vals = list_subpaths(basedir,
just_dirs=just_dirs,
ignore=self._ignore,
pattern=arg_regex,
filter_func=self._match_filter,
filter_args=self._re_args)
return [[(arg_name, val)] for val in vals]
def _check_args(self, arg_names: Iterator[str], self_args: Iterator[str]):
""" Raise a ValueError if `self_args` is empty.
Raise a KeyError if `arg_names` is not a subset of `self_args`.
"""
anames = set(arg_names)
aself = set(self_args)
if not anames and not aself:
return
if not aself or aself is None:
raise AttributeError('This Crumb has no remaining arguments: {}.'.format(self.path))
if not anames.issubset(aself):
raise KeyError("Expected `arg_names` to be a subset of ({}),"
" got {}.".format(list(aself), anames))
def _check_open_args(self, arg_names: Iterator[str]):
""" Raise a KeyError if any of the arguments in `arg_names` is not a crumb
argument name in `self.path`.
Parameters
----------
arg_names: sequence of str
Names of crumb arguments
Raises
------
KeyError
"""
self._check_args(arg_names, self_args=self.open_args())
def update(self, **kwargs) -> 'Crumb':
""" Set the crumb arguments in path to the given values in kwargs and update
self accordingly.
Parameters
----------
kwargs: strings
Returns
-------
crumb: Crumb
"""
self._check_args(list(kwargs.keys()), self_args=self.all_args())
for k, v in kwargs.items():
if not isinstance(v, str):
raise ValueError("Expected a string for the value of argument {}, "
"got {}.".format(k, v))
path = _build_path(self.path, arg_values=kwargs, with_regex=True)
_check(path)
self._argval.update(**kwargs)
return self
def replace(self, **kwargs) -> 'Crumb':
""" Return a copy of self with the crumb arguments in
`kwargs` replaced by its values.
As an analogy to the `str.format` function this function could be called `format`.
Parameters
----------
kwargs: strings
Returns
-------
crumb:
"""
cr = self.copy(self)
return cr.update(**kwargs)
def _arg_parents(self, arg_name: str) -> Dict[str, int]:
""" Return a subdict with the open arguments name and index in `self._argidx`
that come before `arg_name` in the crumb path. Include `arg_name` himself.
Parameters
----------
arg_name:
Returns
-------
arg_deps:
"""
if arg_name not in self.arg_values:
path = self.path
else:
path = self._path
dpth, _, _ = _find_arg_depth(path, arg_name)
return OrderedDict([(arg, idx) for idx, arg in self._open_arg_items() if idx <= dpth])
def _args_open_parents(self, arg_names: Iterator[str]) -> Iterator[str]:
""" Return the name of the arguments that are dependencies of `arg_names`.
Parameters
----------
arg_names:
Returns
-------
rem_deps:
"""
started = False
arg_dads = []
for an in reversed(list(self.open_args())): # take into account that argidx is ordered
if an in arg_names:
started = True
else:
if started:
arg_dads.append(an)
return list(reversed(arg_dads))
def values_map(self, arg_name: str = '', check_exists: bool = False) -> CrumbArgsSequences:
""" Return a list of tuples of crumb arguments with their values from the
first argument until `arg_name`.
Parameters
----------
arg_name: str
If empty will pick the arg_name of the last open argument of the Crumb.
check_exists: bool
Returns
-------
values_map: list of lists of 2-tuples
I call values_map what is called `record` in pandas.
It is a list of lists of 2-tuples, where each 2-tuple
has the shape (arg_name, arg_value).
"""
if not arg_name:
_, arg_name = self._last_open_arg()
if arg_name is None:
return [list(self.arg_values.items())]
arg_deps = self._arg_parents(arg_name)
values_map = None
if arg_deps:
for arg in arg_deps:
values_map = self._arg_values(arg, values_map)
elif arg_name in self.arg_values:
values_map = [[(arg_name, self.arg_values[arg_name])]]
else: # this probably will never be reached.
raise ValueError('Could not build a map of values with '
'argument {}.'.format(arg_name))
return sorted(self._build_and_check(values_map) if check_exists else values_map)
def _build_and_check(self, values_map: CrumbArgsSequences) -> CrumbArgsSequences:
""" Return a values_map of arg_values that lead to existing crumb paths."""
paths = list(self.build_paths(values_map, make_crumbs=True))
yield from (args for args, path in zip(values_map, paths) if path.exists())
def build_paths(
self,
values_map: CrumbArgsSequences,
make_crumbs: bool = True
) -> [Iterator[str], Iterator['Crumb']]:
""" Return a list of paths from each tuple of args from `values_map`
Parameters
----------
values_map: list of sequences of 2-tuple
Example: [[('subject_id', 'haensel'), ('candy', 'lollipos.path.png')],
[('subject_id', 'gretel'), ('candy', 'jujube.png')],
]
make_crumbs: bool
If `make_crumbs` is True will create a Crumb for
each element of the result.
Default: True.
Returns
-------
paths: list of str or list of Crumb
"""
if make_crumbs:
yield from (self.replace(**dict(val)) for val in values_map)
else:
yield from (_build_path(self.path, arg_values=dict(val)) for val in values_map)
def ls(
self,
arg_name: str = '',
fullpath: bool = True,
make_crumbs: bool = True,
check_exists: bool = True
) -> [Iterator[str], Iterator['Crumb']]:
""" Return the list of values for the argument crumb `arg_name`.
This will also unfold any other argument crumb that appears before in the
path.
Parameters
----------
arg_name: str
Name of the argument crumb to be unfolded.
If empty will pick the arg_name of the last open argument of the Crumb.
`arg_name` can also contain file patterns in the same syntax as
the `regex` argument type used in the `__init__` of the object.
fullpath: bool
If True will build the full path of the crumb path, will also append
the rest of crumbs not unfolded.
If False will only return the values for the argument with name
`arg_name`.
make_crumbs: bool
If `fullpath` and `make_crumbs` is True will create a Crumb for
each element of the result.
check_exists: bool
If True will return only str, Crumb or Path if it exists
in the file path, otherwise it may create file paths
that don't have to exist.
Returns
-------
values
Examples
--------
>>> cr = Crumb(os.path.join(os.path.expanduser('~'), '{user_folder}'))
>>> user_folders = cr.ls('user_folder',fullpath=True,make_crumbs=True)
"""
if not arg_name and not fullpath:
raise ValueError('Expecting an `arg_name` if `fullpath` is False.')
if not arg_name:
_, arg_name = self._last_open_arg()
if arg_name is None:
arg_name = ''
# check if there is any regex in the arg_name, if True, set the pattern
# later check if the arg_name is correct
arg_regex = False
if arg_name:
_, (arg_name, arg_regex) = tuple(_depth_names_regexes('{' + arg_name + '}'))[0]
if arg_regex:
old_regex = self.patterns.get(arg_name, None)
self.set_pattern(arg_name=arg_name, arg_regex=arg_regex)
self._check_args([arg_name], self.all_args())
# build the paths or value maps
self._check_ls_params(make_crumbs, fullpath)
# make_crumbs only makes sense if fullpath is True
if not fullpath:
make_crumbs = fullpath
# create the grid of values for the arguments
values_map = self.values_map(arg_name, check_exists=check_exists)
if fullpath:
paths = self.build_paths(values_map, make_crumbs=make_crumbs)
else:
paths = (dict(val)[arg_name] for val in values_map)
# clear and set the old the pattern if it was set for this query
if arg_regex:
self.clear_pattern(arg_name=arg_name)
if old_regex is not None:
self.set_pattern(arg_name=arg_name, arg_regex=old_regex)
return sorted(paths)
def _check_ls_params(self, make_crumbs: bool, fullpath: bool):
""" Raise errors if the arguments are not good for ls function."""
# if the first chunk of the path is a parameter, I am not interested in this (for now)
# check if the path is absolute, if not raise an NotImplementedError
if not self.isabs() and self.path.startswith('{'):
raise NotImplementedError("Cannot list paths that start with an argument. "
"If this is a relative path, use the `abspath()` "
"member function.")
def touch(self, exist_ok: bool = True) -> str:
""" Create a leaf directory and all intermediate ones using the non
crumbed part of `crumb_path`.
If the target directory already exists, raise an IOError if exist_ok
is False. Otherwise no exception is raised.
Parameters
----------
crumb_path: str
exist_ok: bool
Default = True
Returns
-------
nupath: str
The new path created.
"""
return _touch(self.path, exist_ok=exist_ok)
def joinpath(self, suffix: str) -> 'Crumb':
""" Return a copy of the current crumb with the `suffix` path appended.
If suffix has crumb arguments, the whole crumb will be updated.
Parameters
----------
suffix: str
Returns
-------
cr: Crumb
"""
return Crumb(os.path.join(self.path, suffix))
def exists(self) -> bool:
""" Return True if the current crumb path is a possibly existing path,
False otherwise.
Returns
-------
exists: bool
"""
if not has_crumbs(self.path):
return os.path.exists(str(self)) or os.path.islink(str(self))
if not os.path.exists(self.split()[0]):
return False
_, last = self._last_open_arg()
paths = self.ls(last,
fullpath=True,
make_crumbs=False,
check_exists=False)
return any((_split_exists(lp) for lp in paths))
def has_files(self) -> bool:
""" Return True if the current crumb path has any file in its
possible paths.
Returns
-------
has_files: bool
"""
if not os.path.exists(self.split()[0]):
return False
_, last = self._last_open_arg()
paths = self.ls(
last,
fullpath=True,
make_crumbs=True,
check_exists=True
)
return any((os.path.isfile(str(lp)) for lp in paths))
def unfold(self) -> [List['Crumb'], Iterator[pathlib.Path]]:
""" Return a list of all the existing paths until the last crumb argument.
If there are no remaining open arguments,
Returns
-------
paths: list of pathlib.Path
"""
if list(self.open_args()):
return self.ls(
self._last_open_arg()[1],
fullpath=True,
make_crumbs=True,
check_exists=True
)
return [self]
def get_first(self, arg_name: str) -> str:
""" Return the first existing value of the crumb argument `arg_name`.
Parameters
----------
arg_name: str
Returns
-------
values: str
"""
return self[arg_name][0]
def __getitem__(self, arg_name):
""" Return the existing values of the crumb argument `arg_name`
without removing duplicates.
Parameters
----------
arg_name: str
Returns
-------
values: list of str
"""
if arg_name in self._argval:
return [self._argval[arg_name]]
else:
return self.ls(arg_name,
fullpath=False,
make_crumbs=False,
check_exists=True)
def __setitem__(self, key: str, value: str):
self.update(**{key: value})
def __ge__(self, other: 'Crumb') -> bool:
return self._path >= str(other)
def __le__(self, other: 'Crumb') -> bool:
return self._path <= str(other)
def __gt__(self, other: 'Crumb') -> bool:
return self._path > str(other)
def __lt__(self, other: 'Crumb') -> bool:
return self._path < str(other)
def __hash__(self) -> int:
return self._path.__hash__()
def __contains__(self, arg_name) -> bool:
return arg_name in self.all_args()
def __repr__(self) -> str:
return '{}("{}")'.format(type(self).__name__, self.path)
def __str__(self) -> str:
return self.path
def __eq__(self, other: 'Crumb') -> bool:
""" Return True if `self` and `other` are equal, False otherwise.
Parameters
----------
other: Crumb
Returns
-------
is_equal: bool
"""
if self._path != other._path:
return False
if self._argval != other._argval:
return False
if self._ignore != other._ignore:
return False
return True
| apache-2.0 |
nguyentu1602/statsmodels | statsmodels/sandbox/tsa/try_arma_more.py | 34 | 3744 | # -*- coding: utf-8 -*-
"""Periodograms for ARMA and time series
theoretical periodogram of ARMA process and different version
of periodogram estimation
uses scikits.talkbox and matplotlib
Created on Wed Oct 14 23:02:19 2009
Author: josef-pktd
"""
from __future__ import print_function
import numpy as np
from scipy import signal, ndimage
import matplotlib.mlab as mlb
import matplotlib.pyplot as plt
from statsmodels.tsa.arima_process import arma_generate_sample, arma_periodogram
from statsmodels.tsa.stattools import acovf
hastalkbox = False
try:
import scikits.talkbox as stb
import scikits.talkbox.spectral.basic as stbs
except:
hastalkbox = False
ar = [1., -0.7]#[1,0,0,0,0,0,0,-0.7]
ma = [1., 0.3]
ar = np.convolve([1.]+[0]*50 +[-0.6], ar)
ar = np.convolve([1., -0.5]+[0]*49 +[-0.3], ar)
n_startup = 1000
nobs = 1000
# throwing away samples at beginning makes sample more "stationary"
xo = arma_generate_sample(ar,ma,n_startup+nobs)
x = xo[n_startup:]
#moved to tsa.arima_process
#def arma_periodogram(ar, ma, **kwds):
# '''periodogram for ARMA process given by lag-polynomials ar and ma
#
# Parameters
# ----------
# ar : array_like
# autoregressive lag-polynomial with leading 1 and lhs sign
# ma : array_like
# moving average lag-polynomial with leading 1
# kwds : options
# options for scipy.signal.freqz
# default: worN=None, whole=0
#
# Returns
# -------
# w : array
# frequencies
# sd : array
# periodogram, spectral density
#
# Notes
# -----
# Normalization ?
#
# '''
# w, h = signal.freqz(ma, ar, **kwds)
# sd = np.abs(h)**2/np.sqrt(2*np.pi)
# if np.sum(np.isnan(h)) > 0:
# # this happens with unit root or seasonal unit root'
# print 'Warning: nan in frequency response h'
# return w, sd
plt.figure()
plt.plot(x)
rescale = 0
w, h = signal.freqz(ma, ar)
sd = np.abs(h)**2/np.sqrt(2*np.pi)
if np.sum(np.isnan(h)) > 0:
# this happens with unit root or seasonal unit root'
print('Warning: nan in frequency response h')
h[np.isnan(h)] = 1.
rescale = 0
#replace with signal.order_filter ?
pm = ndimage.filters.maximum_filter(sd, footprint=np.ones(5))
maxind = np.nonzero(pm == sd)
print('local maxima frequencies')
wmax = w[maxind]
sdmax = sd[maxind]
plt.figure()
plt.subplot(2,3,1)
if rescale:
plt.plot(w, sd/sd[0], '-', wmax, sdmax/sd[0], 'o')
# plt.plot(w, sd/sd[0], '-')
# plt.hold()
# plt.plot(wmax, sdmax/sd[0], 'o')
else:
plt.plot(w, sd, '-', wmax, sdmax, 'o')
# plt.hold()
# plt.plot(wmax, sdmax, 'o')
plt.title('DGP')
sdm, wm = mlb.psd(x)
sdm = sdm.ravel()
pm = ndimage.filters.maximum_filter(sdm, footprint=np.ones(5))
maxind = np.nonzero(pm == sdm)
plt.subplot(2,3,2)
if rescale:
plt.plot(wm,sdm/sdm[0], '-', wm[maxind], sdm[maxind]/sdm[0], 'o')
else:
plt.plot(wm, sdm, '-', wm[maxind], sdm[maxind], 'o')
plt.title('matplotlib')
if hastalkbox:
sdp, wp = stbs.periodogram(x)
plt.subplot(2,3,3)
if rescale:
plt.plot(wp,sdp/sdp[0])
else:
plt.plot(wp, sdp)
plt.title('stbs.periodogram')
xacov = acovf(x, unbiased=False)
plt.subplot(2,3,4)
plt.plot(xacov)
plt.title('autocovariance')
nr = len(x)#*2/3
#xacovfft = np.fft.fft(xacov[:nr], 2*nr-1)
xacovfft = np.fft.fft(np.correlate(x,x,'full'))
#abs(xacovfft)**2 or equivalently
xacovfft = xacovfft * xacovfft.conj()
plt.subplot(2,3,5)
if rescale:
plt.plot(xacovfft[:nr]/xacovfft[0])
else:
plt.plot(xacovfft[:nr])
plt.title('fft')
if hastalkbox:
sdpa, wpa = stbs.arspec(x, 50)
plt.subplot(2,3,6)
if rescale:
plt.plot(wpa,sdpa/sdpa[0])
else:
plt.plot(wpa, sdpa)
plt.title('stbs.arspec')
#plt.show()
| bsd-3-clause |
benjello/openfisca-france-data | setup.py | 1 | 2540 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <[email protected]>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""OpenFisca -- a versatile microsimulation free software
OpenFisca includes a framework to simulate any tax and social system.
"""
from setuptools import setup, find_packages
classifiers = """\
Development Status :: 2 - Pre-Alpha
License :: OSI Approved :: GNU Affero General Public License v3
Operating System :: POSIX
Programming Language :: Python
Topic :: Scientific/Engineering :: Information Analysis
"""
doc_lines = __doc__.split('\n')
setup(
name = 'OpenFisca-France-Data',
version = '0.4.dev0',
author = 'OpenFisca Team',
author_email = '[email protected]',
classifiers = [classifier for classifier in classifiers.split('\n') if classifier],
description = doc_lines[0],
keywords = 'benefit microsimulation social tax',
license = 'http://www.fsf.org/licensing/licenses/agpl-3.0.html',
long_description = '\n'.join(doc_lines[2:]),
url = 'https://github.com/openfisca/openfisca-france-data',
data_files = [
('share/locale/fr/LC_MESSAGES', ['openfisca_france_data/i18n/fr/LC_MESSAGES/openfisca-france-data.mo']),
],
install_requires = [
'configparser',
'OpenFisca-Core >= 3.0.0, < 4.0',
'OpenFisca-France >= 4.0.0, < 5.0',
'OpenFisca-Survey-Manager[calmar] >= 0.4',
'pandas >= 0.19',
'tables', # Needed by pandas.HDFStore
'wquantiles >= 0.3' # To compute weighted quantiles
],
message_extractors = {
'openfisca_france_data': [
('**.py', 'python', None),
],
},
packages = find_packages(),
zip_safe = False,
)
| agpl-3.0 |
ruohoruotsi/Wavelet-Tree-Synth | nnet/autoencoder_basics.py | 1 | 2844 | from keras.layers import Input, Dense
from keras.models import Model
from keras import regularizers
from keras.datasets import mnist
import numpy as np
(x_train, _), (x_test, _) = mnist.load_data()
# this is the size of our encoded representations
encoding_dim = 32 # 32 floats -> compression of factor 24.5, assuming the input is 784 floats
# "encoded" is the encoded representation of the input
# encoded = Dense(encoding_dim, activation='relu')(input_img)
# encoded = Dense(encoding_dim, activation='relu',
# activity_regularizer=regularizers.activity_l1(10e-5))(input_img)
# "decoded" is the lossy reconstruction of the input
# decoded = Dense(784, activation='sigmoid')(encoded)
# this is our input placeholder
input_img = Input(shape=(784,))
encoded = Dense(128, activation='relu')(input_img)
encoded = Dense(64, activation='relu')(encoded)
encoded = Dense(32, activation='relu')(encoded)
decoded = Dense(64, activation='relu')(encoded)
decoded = Dense(128, activation='relu')(decoded)
decoded = Dense(784, activation='sigmoid')(decoded) # this is no longer the top and bottom of the decoder (takes 128->784)
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
print(x_train.shape)
print(x_test.shape)
autoencoder = Model(input=input_img, output=decoded)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
autoencoder.fit(x_train, x_train,
nb_epoch=100,
batch_size=256,
shuffle=True,
validation_data=(x_test, x_test))
encoder = Model(input=input_img, output=encoded)
autoencoder.summary()
encoder.summary()
# for the shallow autoencoder
# encoded_input = Input(shape=(encoding_dim,))
# decoder = Model(input=encoded_input, output=decoded)
# decoder_layer = autoencoder.layers[-1]
# decoder = Model(input=encoded_input, output=decoder_layer(encoded_input))
# encode and decode some digits
# note that we take them from the *test* set
# encoded_imgs = encoder.predict(x_test)
# decoded_imgs = decoder.predict(encoded_imgs)
# for the deep autoencoder
decoded_imgs = autoencoder.predict(x_test)
# use Matplotlib (don't ask)
import matplotlib.pyplot as plt
n = 10 # how many digits we will display
plt.figure(figsize=(20, 4))
for i in range(n):
# display original
ax = plt.subplot(2, n, i + 1)
plt.imshow(x_test[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(decoded_imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show() | gpl-2.0 |
theoryno3/scikit-learn | sklearn/decomposition/tests/test_kernel_pca.py | 14 | 8137 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import (assert_array_almost_equal, assert_less,
assert_equal, assert_not_equal,
assert_raises)
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.metrics.pairwise import rbf_kernel
def test_kernel_pca():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
for eigen_solver in ("auto", "dense", "arpack"):
for kernel in ("linear", "rbf", "poly", histogram):
# histogram kernel produces singular matrix inside linalg.solve
# XXX use a least-squares approximation?
inv = not callable(kernel)
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=inv)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# non-regression test: previously, gamma would be 0 by default,
# forcing all eigenvalues to 0 under the poly kernel
assert_not_equal(X_fit_transformed, [])
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
if inv:
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert_equal(X_pred2.shape, X_pred.shape)
def test_invalid_parameters():
assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True,
kernel='precomputed')
def test_kernel_pca_sparse():
rng = np.random.RandomState(0)
X_fit = sp.csr_matrix(rng.random_sample((5, 4)))
X_pred = sp.csr_matrix(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=False)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
# X_pred2 = kpca.inverse_transform(X_pred_transformed)
# assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_linear_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
# for a linear kernel, kernel PCA should find the same projection as PCA
# modulo the sign (direction)
# fit only the first four components: fifth is near zero eigenvalue, so
# can be trimmed due to roundoff error
assert_array_almost_equal(
np.abs(KernelPCA(4).fit(X_fit).transform(X_pred)),
np.abs(PCA(4).fit(X_fit).transform(X_pred)))
def test_kernel_pca_n_components():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
for c in [1, 2, 4]:
kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
shape = kpca.fit(X_fit).transform(X_pred).shape
assert_equal(shape, (2, c))
def test_remove_zero_eig():
X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
# n_components=None (default) => remove_zero_eig is True
kpca = KernelPCA()
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
kpca = KernelPCA(n_components=2)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 2))
kpca = KernelPCA(n_components=2, remove_zero_eig=True)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
def test_kernel_pca_precomputed():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
X_kpca = KernelPCA(4, eigen_solver=eigen_solver).\
fit(X_fit).transform(X_pred)
X_kpca2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_pred, X_fit.T))
X_kpca_train = KernelPCA(
4, eigen_solver=eigen_solver,
kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_fit, X_fit.T))
assert_array_almost_equal(np.abs(X_kpca),
np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train),
np.abs(X_kpca_train2))
def test_kernel_pca_invalid_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((2, 4))
kpca = KernelPCA(kernel="tototiti")
assert_raises(ValueError, kpca.fit, X_fit)
def test_gridsearch_pipeline():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="rbf", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
grid_search.fit(X, y)
assert_equal(grid_search.best_score_, 1)
def test_gridsearch_pipeline_precomputed():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model using a precomputed kernel.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(Perceptron__n_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.)
grid_search.fit(X_kernel, y)
assert_equal(grid_search.best_score_, 1)
def test_nested_circles():
# Test the linear separability of the first 2D KPCA transform
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron().fit(X, y).score(X, y)
assert_less(train_score, 0.8)
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(kernel="rbf", n_components=2,
fit_inverse_transform=True, gamma=2.)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron().fit(X_kpca, y).score(X_kpca, y)
assert_equal(train_score, 1.0)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
lbdreyer/cartopy | tools/download.py | 1 | 4670 | #!/usr/bin/env python
# (C) British Crown Copyright 2011 - 2012, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <http://www.gnu.org/licenses/>.
"""
This module provides a command-line tool for triggering the download of
the data used by various Feature instances.
For detail on how to use this tool, execute it with the `-h` option:
python download.py -h
"""
from __future__ import print_function
import argparse
from cartopy.feature import Feature, GSHHSFeature, NaturalEarthFeature
from cartopy.crs import PlateCarree
import matplotlib.pyplot as plt
ALL_SCALES = ('110m', '50m', '10m')
FEATURE_DEFN_GROUPS = {
# Only need one GSHHS resolution because they *all* get downloaded
# from one file.
'gshhs': GSHHSFeature(scale='c'),
'physical': (
('physical', 'coastline', ALL_SCALES),
('physical', 'land', ALL_SCALES),
('physical', 'ocean', ALL_SCALES),
('physical', 'rivers_lake_centerlines', ALL_SCALES),
('physical', 'lakes', ALL_SCALES),
('physical', 'geography_regions_polys', ALL_SCALES),
('physical', 'geography_regions_points', ALL_SCALES),
('physical', 'geography_marine_polys', ALL_SCALES),
('physical', 'glaciated_areas', ALL_SCALES)
),
'cultural': (
('cultural', 'admin_0_countries', ALL_SCALES),
('cultural', 'admin_0_countries_lakes', ALL_SCALES),
('cultural', 'admin_0_sovereignty', ALL_SCALES),
('cultural', 'admin_0_boundary_lines_land', ALL_SCALES),
('cultural', 'urban_areas', ('50m', '10m')),
#('cultural', 'roads', '10m'), # ERROR in NE dataset?
('cultural', 'roads_north_america', '10m'),
('cultural', 'railroads', '10m'),
('cultural', 'railroads_north_america', '10m'),
),
'cultural-extra': (
('cultural', 'admin_0_map_units', '110m'),
('cultural', 'admin_0_scale_rank', '110m'),
('cultural', 'admin_0_tiny_countries', '110m'),
('cultural', 'admin_0_pacific_groupings', '110m'),
('cultural', 'admin_1_states_provinces_shp', '110m'),
('cultural', 'admin_1_states_provinces_lines', '110m'),
),
}
def download_features(group_names, hold):
plt.ion()
ax = plt.axes(projection=PlateCarree())
ax.set_global()
for group_name in group_names:
feature_defns = FEATURE_DEFN_GROUPS[group_name]
if isinstance(feature_defns, Feature):
features = [feature_defns]
else:
features = []
for category, name, scales in feature_defns:
if not isinstance(scales, tuple):
scales = (scales,)
for scale in scales:
features.append(NaturalEarthFeature(category, name, scale))
for feature in features:
ax.add_feature(feature)
plt.draw()
plt.ioff()
if hold:
plt.show()
if __name__ == '__main__':
def group_name(string):
if string not in FEATURE_DEFN_GROUPS:
msg = '{!r} is not a valid feature group (choose from {!s})'
msg = msg.format(string, list(FEATURE_DEFN_GROUPS.keys()))
raise argparse.ArgumentTypeError(msg)
return string
parser = argparse.ArgumentParser(description='Download feature datasets.')
parser.add_argument('group_names', nargs='*',
type=group_name,
metavar='GROUP_NAME',
help='Feature group name')
parser.add_argument('--hold', action='store_true',
help='keep the matplotlib window open')
parser.add_argument('--show', action='store_true',
help='show the list of valid feature group names')
args = parser.parse_args()
if args.show:
print('Feature group names:')
for name in sorted(FEATURE_DEFN_GROUPS.keys()):
print(' ', name)
elif not args.group_names:
parser.error('Please supply one or more feature group names.')
download_features(args.group_names, args.hold)
| lgpl-3.0 |
smartkit/COVITAS | V1.0_ngix:apache_mosquitto_RESTful_Solr_LIRE/octo-ninja/rest-pandas/OctoNinja/app/views.py | 1 | 1324 | # views.py
from rest_pandas import PandasView
from .models import TimeSeries
class TimeSeriesView(PandasView):
model = TimeSeries
# In response to get(), the underlying Django REST Framework ListAPIView
# will load the default queryset (self.model.objects.all()) and then pass
# it to the following function.
def filter_queryset(self, qs):
# At this point, you can filter queryset based on self.request or other
# settings (useful for limiting memory usage)
return qs
# Then, the included PandasSerializer will serialize the queryset into a
# simple list of dicts (using the DRF ModelSerializer). To customize
# which fields to include, subclass PandasSerializer and set the
# appropriate ModelSerializer options. Then, set the serializer_class
# property on the view to your PandasSerializer subclass.
# Next, the PandasSerializer will load the ModelSerializer result into a
# DataFrame and pass it to the following function on the view.
def transform_dataframe(self, dataframe):
# Here you can transform the dataframe based on self.request
# (useful for pivoting or computing statistics)
return dataframe
# Finally, the included Renderers will process the dataframe into one of
# the output formats below. | unlicense |
njwilson23/scipy | scipy/stats/_multivariate.py | 35 | 69253 | #
# Author: Joris Vankerschaver 2013
#
from __future__ import division, print_function, absolute_import
import numpy as np
import scipy.linalg
from scipy.misc import doccer
from scipy.special import gammaln, psi, multigammaln
from scipy._lib._util import check_random_state
__all__ = ['multivariate_normal', 'dirichlet', 'wishart', 'invwishart']
_LOG_2PI = np.log(2 * np.pi)
_LOG_2 = np.log(2)
_LOG_PI = np.log(np.pi)
def _process_parameters(dim, mean, cov):
"""
Infer dimensionality from mean or covariance matrix, ensure that
mean and covariance are full vector resp. matrix.
"""
# Try to infer dimensionality
if dim is None:
if mean is None:
if cov is None:
dim = 1
else:
cov = np.asarray(cov, dtype=float)
if cov.ndim < 2:
dim = 1
else:
dim = cov.shape[0]
else:
mean = np.asarray(mean, dtype=float)
dim = mean.size
else:
if not np.isscalar(dim):
raise ValueError("Dimension of random variable must be a scalar.")
# Check input sizes and return full arrays for mean and cov if necessary
if mean is None:
mean = np.zeros(dim)
mean = np.asarray(mean, dtype=float)
if cov is None:
cov = 1.0
cov = np.asarray(cov, dtype=float)
if dim == 1:
mean.shape = (1,)
cov.shape = (1, 1)
if mean.ndim != 1 or mean.shape[0] != dim:
raise ValueError("Array 'mean' must be a vector of length %d." % dim)
if cov.ndim == 0:
cov = cov * np.eye(dim)
elif cov.ndim == 1:
cov = np.diag(cov)
elif cov.ndim == 2 and cov.shape != (dim, dim):
rows, cols = cov.shape
if rows != cols:
msg = ("Array 'cov' must be square if it is two dimensional,"
" but cov.shape = %s." % str(cov.shape))
else:
msg = ("Dimension mismatch: array 'cov' is of shape %s,"
" but 'mean' is a vector of length %d.")
msg = msg % (str(cov.shape), len(mean))
raise ValueError(msg)
elif cov.ndim > 2:
raise ValueError("Array 'cov' must be at most two-dimensional,"
" but cov.ndim = %d" % cov.ndim)
return dim, mean, cov
def _process_quantiles(x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x[np.newaxis]
elif x.ndim == 1:
if dim == 1:
x = x[:, np.newaxis]
else:
x = x[np.newaxis, :]
return x
def _squeeze_output(out):
"""
Remove single-dimensional entries from array and convert to scalar,
if necessary.
"""
out = out.squeeze()
if out.ndim == 0:
out = out[()]
return out
def _eigvalsh_to_eps(spectrum, cond=None, rcond=None):
"""
Determine which eigenvalues are "small" given the spectrum.
This is for compatibility across various linear algebra functions
that should agree about whether or not a Hermitian matrix is numerically
singular and what is its numerical matrix rank.
This is designed to be compatible with scipy.linalg.pinvh.
Parameters
----------
spectrum : 1d ndarray
Array of eigenvalues of a Hermitian matrix.
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
Returns
-------
eps : float
Magnitude cutoff for numerical negligibility.
"""
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = spectrum.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
eps = cond * np.max(abs(spectrum))
return eps
def _pinv_1d(v, eps=1e-5):
"""
A helper function for computing the pseudoinverse.
Parameters
----------
v : iterable of numbers
This may be thought of as a vector of eigenvalues or singular values.
eps : float
Values with magnitude no greater than eps are considered negligible.
Returns
-------
v_pinv : 1d float ndarray
A vector of pseudo-inverted numbers.
"""
return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float)
class _PSD(object):
"""
Compute coordinated functions of a symmetric positive semidefinite matrix.
This class addresses two issues. Firstly it allows the pseudoinverse,
the logarithm of the pseudo-determinant, and the rank of the matrix
to be computed using one call to eigh instead of three.
Secondly it allows these functions to be computed in a way
that gives mutually compatible results.
All of the functions are computed with a common understanding as to
which of the eigenvalues are to be considered negligibly small.
The functions are designed to coordinate with scipy.linalg.pinvh()
but not necessarily with np.linalg.det() or with np.linalg.matrix_rank().
Parameters
----------
M : array_like
Symmetric positive semidefinite matrix (2-D).
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
lower : bool, optional
Whether the pertinent array data is taken from the lower
or upper triangle of M. (Default: lower)
check_finite : bool, optional
Whether to check that the input matrices contain only finite
numbers. Disabling may give a performance gain, but may result
in problems (crashes, non-termination) if the inputs do contain
infinities or NaNs.
allow_singular : bool, optional
Whether to allow a singular matrix. (Default: True)
Notes
-----
The arguments are similar to those of scipy.linalg.pinvh().
"""
def __init__(self, M, cond=None, rcond=None, lower=True,
check_finite=True, allow_singular=True):
# Compute the symmetric eigendecomposition.
# Note that eigh takes care of array conversion, chkfinite,
# and assertion that the matrix is square.
s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite)
eps = _eigvalsh_to_eps(s, cond, rcond)
if np.min(s) < -eps:
raise ValueError('the input matrix must be positive semidefinite')
d = s[s > eps]
if len(d) < len(s) and not allow_singular:
raise np.linalg.LinAlgError('singular matrix')
s_pinv = _pinv_1d(s, eps)
U = np.multiply(u, np.sqrt(s_pinv))
# Initialize the eagerly precomputed attributes.
self.rank = len(d)
self.U = U
self.log_pdet = np.sum(np.log(d))
# Initialize an attribute to be lazily computed.
self._pinv = None
@property
def pinv(self):
if self._pinv is None:
self._pinv = np.dot(self.U, self.U.T)
return self._pinv
_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
Whether to allow a singular covariance matrix. (Default: False)
"""
_doc_callparams_note = \
"""Setting the parameter `mean` to `None` is equivalent to having `mean`
be the zero-vector. The parameter `cov` can be a scalar, in which case
the covariance matrix is the identity times that value, a vector of
diagonal entries for the covariance matrix, or a two-dimensional
array_like.
"""
_doc_random_state = """\
random_state : None or int or np.random.RandomState instance, optional
If int or RandomState, use it for drawing the random variates.
If None (or np.random), the global np.random state is used.
Default is None.
"""
_doc_frozen_callparams = ""
_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
docdict_params = {
'_doc_default_callparams': _doc_default_callparams,
'_doc_callparams_note': _doc_callparams_note,
'_doc_random_state': _doc_random_state
}
docdict_noparams = {
'_doc_default_callparams': _doc_frozen_callparams,
'_doc_callparams_note': _doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multi_rv_generic(object):
"""
Class which encapsulates common functionality between all multivariate
distributions.
"""
def __init__(self, seed=None):
super(multi_rv_generic, self).__init__()
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the RandomState object for generating random variates.
This can be either None or an existing RandomState object.
If None (or np.random), use the RandomState singleton used by np.random.
If already a RandomState instance, use it.
If an int, use a new RandomState instance seeded with seed.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def _get_random_state(self, random_state):
if random_state is not None:
return check_random_state(random_state)
else:
return self._random_state
class multi_rv_frozen(object):
"""
Class which encapsulates common functionality between all frozen
multivariate distributions.
"""
@property
def random_state(self):
return self._dist._random_state
@random_state.setter
def random_state(self, seed):
self._dist._random_state = check_random_state(seed)
class multivariate_normal_gen(multi_rv_generic):
r"""
A multivariate normal random variable.
The `mean` keyword specifies the mean. The `cov` keyword specifies the
covariance matrix.
Methods
-------
``pdf(x, mean=None, cov=1, allow_singular=False)``
Probability density function.
``logpdf(x, mean=None, cov=1, allow_singular=False)``
Log of the probability density function.
``rvs(mean=None, cov=1, size=1, random_state=None)``
Draw random samples from a multivariate normal distribution.
``entropy()``
Compute the differential entropy of the multivariate normal.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" multivariate normal
random variable:
rv = multivariate_normal(mean=None, cov=1, allow_singular=False)
- Frozen object with the same methods but holding the given
mean and covariance fixed.
Notes
-----
%(_doc_callparams_note)s
The covariance matrix `cov` must be a (symmetric) positive
semi-definite matrix. The determinant and inverse of `cov` are computed
as the pseudo-determinant and pseudo-inverse, respectively, so
that `cov` does not need to have full rank.
The probability density function for `multivariate_normal` is
.. math::
f(x) = \frac{1}{\sqrt{(2 \pi)^k \det \Sigma}}
\exp\left( -\frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right),
where :math:`\mu` is the mean, :math:`\Sigma` the covariance matrix,
and :math:`k` is the dimension of the space where :math:`x` takes values.
.. versionadded:: 0.14.0
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import multivariate_normal
>>> x = np.linspace(0, 5, 10, endpoint=False)
>>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y
array([ 0.00108914, 0.01033349, 0.05946514, 0.20755375, 0.43939129,
0.56418958, 0.43939129, 0.20755375, 0.05946514, 0.01033349])
>>> fig1 = plt.figure()
>>> ax = fig1.add_subplot(111)
>>> ax.plot(x, y)
The input quantiles can be any shape of array, as long as the last
axis labels the components. This allows us for instance to
display the frozen pdf for a non-isotropic random variable in 2D as
follows:
>>> x, y = np.mgrid[-1:1:.01, -1:1:.01]
>>> pos = np.dstack((x, y))
>>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]])
>>> fig2 = plt.figure()
>>> ax2 = fig2.add_subplot(111)
>>> ax2.contourf(x, y, rv.pdf(pos))
"""
def __init__(self, seed=None):
super(multivariate_normal_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, docdict_params)
def __call__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""
Create a frozen multivariate normal distribution.
See `multivariate_normal_frozen` for more information.
"""
return multivariate_normal_frozen(mean, cov,
allow_singular=allow_singular,
seed=seed)
def _logpdf(self, x, mean, prec_U, log_det_cov, rank):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
prec_U : ndarray
A decomposition such that np.dot(prec_U, prec_U.T)
is the precision matrix, i.e. inverse of the covariance matrix.
log_det_cov : float
Logarithm of the determinant of the covariance matrix
rank : int
Rank of the covariance matrix.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
dev = x - mean
maha = np.sum(np.square(np.dot(dev, prec_U)), axis=-1)
return -0.5 * (rank * _LOG_2PI + log_det_cov + maha)
def logpdf(self, x, mean, cov, allow_singular=False):
"""
Log of the multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, mean, cov = _process_parameters(None, mean, cov)
x = _process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank)
return _squeeze_output(out)
def pdf(self, x, mean, cov, allow_singular=False):
"""
Multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, mean, cov = _process_parameters(None, mean, cov)
x = _process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = np.exp(self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank))
return _squeeze_output(out)
def rvs(self, mean=None, cov=1, size=1, random_state=None):
"""
Draw random samples from a multivariate normal distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
Notes
-----
%(_doc_callparams_note)s
"""
dim, mean, cov = _process_parameters(None, mean, cov)
random_state = self._get_random_state(random_state)
out = random_state.multivariate_normal(mean, cov, size)
return _squeeze_output(out)
def entropy(self, mean=None, cov=1):
"""
Compute the differential entropy of the multivariate normal.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
Notes
-----
%(_doc_callparams_note)s
"""
dim, mean, cov = _process_parameters(None, mean, cov)
_, logdet = np.linalg.slogdet(2 * np.pi * np.e * cov)
return 0.5 * logdet
multivariate_normal = multivariate_normal_gen()
class multivariate_normal_frozen(multi_rv_frozen):
def __init__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""
Create a frozen multivariate normal distribution.
Parameters
----------
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
If this flag is True then tolerate a singular
covariance matrix (default False).
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
Examples
--------
When called with the default parameters, this will create a 1D random
variable with mean 0 and covariance 1:
>>> from scipy.stats import multivariate_normal
>>> r = multivariate_normal()
>>> r.mean
array([ 0.])
>>> r.cov
array([[1.]])
"""
self.dim, self.mean, self.cov = _process_parameters(None, mean, cov)
self.cov_info = _PSD(self.cov, allow_singular=allow_singular)
self._dist = multivariate_normal_gen(seed)
def logpdf(self, x):
x = _process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.mean, self.cov_info.U,
self.cov_info.log_pdet, self.cov_info.rank)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.cov, size, random_state)
def entropy(self):
"""
Computes the differential entropy of the multivariate normal.
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
"""
log_pdet = self.cov_info.log_pdet
rank = self.cov_info.rank
return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
method = multivariate_normal_gen.__dict__[name]
method_frozen = multivariate_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__, docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, docdict_params)
_dirichlet_doc_default_callparams = """\
alpha : array_like
The concentration parameters. The number of entries determines the
dimensionality of the distribution.
"""
_dirichlet_doc_frozen_callparams = ""
_dirichlet_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
dirichlet_docdict_params = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_default_callparams,
'_doc_random_state': _doc_random_state
}
dirichlet_docdict_noparams = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_frozen_callparams,
'_doc_random_state': _doc_random_state
}
def _dirichlet_check_parameters(alpha):
alpha = np.asarray(alpha)
if np.min(alpha) <= 0:
raise ValueError("All parameters must be greater than 0")
elif alpha.ndim != 1:
raise ValueError("Parameter vector 'a' must be one dimensional, "
"but a.shape = %s." % (alpha.shape, ))
return alpha
def _dirichlet_check_input(alpha, x):
x = np.asarray(x)
if x.shape[0] + 1 != alpha.shape[0] and x.shape[0] != alpha.shape[0]:
raise ValueError("Vector 'x' must have either the same number "
"of entries as, or one entry fewer than, "
"parameter vector 'a', but alpha.shape = %s "
"and x.shape = %s." % (alpha.shape, x.shape))
if x.shape[0] != alpha.shape[0]:
xk = np.array([1 - np.sum(x, 0)])
if xk.ndim == 1:
x = np.append(x, xk)
elif xk.ndim == 2:
x = np.vstack((x, xk))
else:
raise ValueError("The input must be one dimensional or a two "
"dimensional matrix containing the entries.")
if np.min(x) <= 0:
raise ValueError("Each entry in 'x' must be greater than zero.")
if np.max(x) > 1:
raise ValueError("Each entry in 'x' must be smaller or equal one.")
if (np.abs(np.sum(x, 0) - 1.0) > 10e-10).any():
raise ValueError("The input vector 'x' must lie within the normal "
"simplex. but np.sum(x, 0) = %s." % np.sum(x, 0))
return x
def _lnB(alpha):
r"""
Internal helper function to compute the log of the useful quotient
.. math::
B(\alpha) = \frac{\prod_{i=1}{K}\Gamma(\alpha_i)}{\Gamma\left(\sum_{i=1}^{K}\alpha_i\right)}
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
B : scalar
Helper quotient, internal use only
"""
return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha))
class dirichlet_gen(multi_rv_generic):
r"""
A Dirichlet random variable.
The `alpha` keyword specifies the concentration parameters of the
distribution.
.. versionadded:: 0.15.0
Methods
-------
``pdf(x, alpha)``
Probability density function.
``logpdf(x, alpha)``
Log of the probability density function.
``rvs(alpha, size=1, random_state=None)``
Draw random samples from a Dirichlet distribution.
``mean(alpha)``
The mean of the Dirichlet distribution
``var(alpha)``
The variance of the Dirichlet distribution
``entropy(alpha)``
Compute the differential entropy of the multivariate normal.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix
concentration parameters, returning a "frozen" Dirichlet
random variable:
rv = dirichlet(alpha)
- Frozen object with the same methods but holding the given
concentration parameters fixed.
Notes
-----
Each :math:`\alpha` entry must be positive. The distribution has only
support on the simplex defined by
.. math::
\sum_{i=1}^{K} x_i \le 1
The probability density function for `dirichlet` is
.. math::
f(x) = \frac{1}{\mathrm{B}(\boldsymbol\alpha)} \prod_{i=1}^K x_i^{\alpha_i - 1}
where
.. math::
\mathrm{B}(\boldsymbol\alpha) = \frac{\prod_{i=1}^K \Gamma(\alpha_i)}
{\Gamma\bigl(\sum_{i=1}^K \alpha_i\bigr)}
and :math:`\boldsymbol\alpha=(\alpha_1,\ldots,\alpha_K)`, the
concentration parameters and :math:`K` is the dimension of the space
where :math:`x` takes values.
Note that the dirichlet interface is somewhat inconsistent.
The array returned by the rvs function is transposed
with respect to the format expected by the pdf and logpdf.
"""
def __init__(self, seed=None):
super(dirichlet_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, dirichlet_docdict_params)
def __call__(self, alpha, seed=None):
return dirichlet_frozen(alpha, seed=seed)
def _logpdf(self, x, alpha):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
%(_dirichlet_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
lnB = _lnB(alpha)
return - lnB + np.sum((np.log(x.T) * (alpha - 1)).T, 0)
def logpdf(self, x, alpha):
"""
Log of the Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = self._logpdf(x, alpha)
return _squeeze_output(out)
def pdf(self, x, alpha):
"""
The Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray
The probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = np.exp(self._logpdf(x, alpha))
return _squeeze_output(out)
def mean(self, alpha):
"""
Compute the mean of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
mu : scalar
Mean of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
out = alpha / (np.sum(alpha))
return _squeeze_output(out)
def var(self, alpha):
"""
Compute the variance of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
v : scalar
Variance of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
out = (alpha * (alpha0 - alpha)) / ((alpha0 * alpha0) * (alpha0 + 1))
return out
def entropy(self, alpha):
"""
Compute the differential entropy of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
lnB = _lnB(alpha)
K = alpha.shape[0]
out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum(
(alpha - 1) * scipy.special.psi(alpha))
return _squeeze_output(out)
def rvs(self, alpha, size=1, random_state=None):
"""
Draw random samples from a Dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
size : int, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
"""
alpha = _dirichlet_check_parameters(alpha)
random_state = self._get_random_state(random_state)
return random_state.dirichlet(alpha, size=size)
dirichlet = dirichlet_gen()
class dirichlet_frozen(multi_rv_frozen):
def __init__(self, alpha, seed=None):
self.alpha = _dirichlet_check_parameters(alpha)
self._dist = dirichlet_gen(seed)
def logpdf(self, x):
return self._dist.logpdf(x, self.alpha)
def pdf(self, x):
return self._dist.pdf(x, self.alpha)
def mean(self):
return self._dist.mean(self.alpha)
def var(self):
return self._dist.var(self.alpha)
def entropy(self):
return self._dist.entropy(self.alpha)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.alpha, size, random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs', 'mean', 'var', 'entropy']:
method = dirichlet_gen.__dict__[name]
method_frozen = dirichlet_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, dirichlet_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, dirichlet_docdict_params)
_wishart_doc_default_callparams = """\
df : int
Degrees of freedom, must be greater than or equal to dimension of the
scale matrix
scale : array_like
Symmetric positive definite scale matrix of the distribution
"""
_wishart_doc_callparams_note = ""
_wishart_doc_frozen_callparams = ""
_wishart_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
wishart_docdict_params = {
'_doc_default_callparams': _wishart_doc_default_callparams,
'_doc_callparams_note': _wishart_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
wishart_docdict_noparams = {
'_doc_default_callparams': _wishart_doc_frozen_callparams,
'_doc_callparams_note': _wishart_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class wishart_gen(multi_rv_generic):
r"""
A Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal precision matrix (the inverse of the covariance
matrix).
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from a Wishart distribution.
``entropy()``
Compute the differential entropy of the Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" Wishart random
variable:
rv = wishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
invwishart, chi2
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The Wishart distribution is often denoted
.. math::
W_p(\nu, \Sigma)
where :math:`\nu` is the degrees of freedom and :math:`\Sigma` is the
:math:`p \times p` scale matrix.
The probability density function for `wishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W_p(\nu, \Sigma)`, then
its PDF is given by:
.. math::
f(S) = \frac{|S|^{\frac{\nu - p - 1}{2}}}{2^{ \frac{\nu p}{2} }
|\Sigma|^\frac{\nu}{2} \Gamma_p \left ( \frac{\nu}{2} \right )}
\exp\left( -tr(\Sigma^{-1} S) / 2 \right)
If :math:`S \sim W_p(\nu, \Sigma)` (Wishart) then
:math:`S^{-1} \sim W_p^{-1}(\nu, \Sigma^{-1})` (inverse Wishart).
If the scale matrix is 1-dimensional and equal to one, then the Wishart
distribution :math:`W_1(\nu, 1)` collapses to the :math:`\chi^2(\nu)`
distribution.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] W.B. Smith and R.R. Hocking, "Algorithm AS 53: Wishart Variate
Generator", Applied Statistics, vol. 21, pp. 341-345, 1972.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import wishart, chi2
>>> x = np.linspace(1e-5, 8, 100)
>>> w = wishart.pdf(x, df=3, scale=1); w[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> c = chi2.pdf(x, 3); c[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> plt.plot(x, w)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super(wishart_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""
Create a frozen Wishart distribution.
See `wishart_frozen` for more information.
"""
return wishart_frozen(df, scale, seed)
def _process_parameters(self, df, scale):
if scale is None:
scale = 1.0
scale = np.asarray(scale, dtype=float)
if scale.ndim == 0:
scale = scale[np.newaxis,np.newaxis]
elif scale.ndim == 1:
scale = np.diag(scale)
elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]:
raise ValueError("Array 'scale' must be square if it is two"
" dimensional, but scale.scale = %s."
% str(scale.shape))
elif scale.ndim > 2:
raise ValueError("Array 'scale' must be at most two-dimensional,"
" but scale.ndim = %d" % scale.ndim)
dim = scale.shape[0]
if df is None:
df = dim
elif not np.isscalar(df):
raise ValueError("Degrees of freedom must be a scalar.")
elif df < dim:
raise ValueError("Degrees of freedom cannot be less than dimension"
" of scale matrix, but df = %d" % df)
return dim, df, scale
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x * np.eye(dim)[:, :, np.newaxis]
if x.ndim == 1:
if dim == 1:
x = x[np.newaxis, np.newaxis, :]
else:
x = np.diag(x)[:, :, np.newaxis]
elif x.ndim == 2:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square if they are two"
" dimensional, but x.shape = %s."
% str(x.shape))
x = x[:, :, np.newaxis]
elif x.ndim == 3:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square in the first two"
" dimensions if they are three dimensional"
", but x.shape = %s." % str(x.shape))
elif x.ndim > 3:
raise ValueError("Quantiles must be at most two-dimensional with"
" an additional dimension for multiple"
"components, but x.ndim = %d" % x.ndim)
# Now we have 3-dim array; should have shape [dim, dim, *]
if not x.shape[0:2] == (dim, dim):
raise ValueError('Quantiles have incompatible dimensions: should'
' be %s, got %s.' % ((dim, dim), x.shape[0:2]))
return x
def _process_size(self, size):
size = np.asarray(size)
if size.ndim == 0:
size = size[np.newaxis]
elif size.ndim > 1:
raise ValueError('Size must be an integer or tuple of integers;'
' thus must have dimension <= 1.'
' Got size.ndim = %s' % str(tuple(size)))
n = size.prod()
shape = tuple(size)
return n, shape
def _logpdf(self, x, dim, df, scale, log_det_scale, C):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
# log determinant of x
# Note: x has components along the last axis, so that x.T has
# components alone the 0-th axis. Then since det(A) = det(A'), this
# gives us a 1-dim vector of determinants
# Retrieve tr(scale^{-1} x)
log_det_x = np.zeros(x.shape[-1])
scale_inv_x = np.zeros(x.shape)
tr_scale_inv_x = np.zeros(x.shape[-1])
for i in range(x.shape[-1]):
_, log_det_x[i] = self._cholesky_logdet(x[:,:,i])
scale_inv_x[:,:,i] = scipy.linalg.cho_solve((C, True), x[:,:,i])
tr_scale_inv_x[i] = scale_inv_x[:,:,i].trace()
# Log PDF
out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) -
(0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale +
multigammaln(0.5*df, dim)))
return out
def logpdf(self, x, df, scale):
"""
Log of the Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
# Cholesky decomposition of scale, get log(det(scale))
C, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale, C)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""
Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
return df * scale
def mean(self, df, scale):
"""
Mean of the Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out)
def _mode(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
if df >= dim + 1:
out = (df-dim-1) * scale
else:
out = None
return out
def mode(self, df, scale):
"""
Mode of the Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float or None
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _var(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
var = scale**2
diag = scale.diagonal() # 1 x dim array
var += np.outer(diag, diag)
var *= df
return var
def var(self, df, scale):
"""
Variance of the Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out)
def _standard_rvs(self, n, shape, dim, df, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
random_state : np.random.RandomState instance
RandomState used for drawing the random variates.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
# Random normal variates for off-diagonal elements
n_tril = dim * (dim-1) // 2
covariances = random_state.normal(
size=n*n_tril).reshape(shape+(n_tril,))
# Random chi-square variates for diagonal elements
variances = np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5
for i in range(dim)]].reshape((dim,) + shape[::-1]).T
# Create the A matri(ces) - lower triangular
A = np.zeros(shape + (dim, dim))
# Input the covariances
size_idx = tuple([slice(None,None,None)]*len(shape))
tril_idx = np.tril_indices(dim, k=-1)
A[size_idx + tril_idx] = covariances
# Input the variances
diag_idx = np.diag_indices(dim)
A[size_idx + diag_idx] = variances
return A
def _rvs(self, n, shape, dim, df, C, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triangular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Calculate the matrices A, which are actually lower triangular
# Cholesky factorizations of a matrix B such that B ~ W(df, I)
A = self._standard_rvs(n, shape, dim, df, random_state)
# Calculate SA = C A A' C', where SA ~ W(df, scale)
# Note: this is the product of a (lower) (lower) (lower)' (lower)'
# or, denoting B = AA', it is C B C' where C is the lower
# triangular Cholesky factorization of the scale matrix.
# this appears to conflict with the instructions in [1]_, which
# suggest that it should be D' B D where D is the lower
# triangular factorization of the scale matrix. However, it is
# meant to refer to the Bartlett (1933) representation of a
# Wishart random variate as L A A' L' where L is lower triangular
# so it appears that understanding D' to be upper triangular
# is either a typo in or misreading of [1]_.
for index in np.ndindex(shape):
CA = np.dot(C, A[index])
A[index] = np.dot(CA, CA.T)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""
Draw random samples from a Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Cholesky decomposition of scale
C = scipy.linalg.cholesky(scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def _entropy(self, dim, df, log_det_scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'entropy' instead.
"""
return (
0.5 * (dim+1) * log_det_scale +
0.5 * dim * (dim+1) * _LOG_2 +
multigammaln(0.5*df, dim) -
0.5 * (df - dim - 1) * np.sum(
[psi(0.5*(df + 1 - (i+1))) for i in range(dim)]
) +
0.5 * df * dim
)
def entropy(self, df, scale):
"""
Compute the differential entropy of the Wishart.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Wishart distribution
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
_, log_det_scale = self._cholesky_logdet(scale)
return self._entropy(dim, df, log_det_scale)
def _cholesky_logdet(self, scale):
"""
Compute Cholesky decomposition and determine (log(det(scale)).
Parameters
----------
scale : ndarray
Scale matrix.
Returns
-------
c_decomp : ndarray
The Cholesky decomposition of `scale`.
logdet : scalar
The log of the determinant of `scale`.
Notes
-----
This computation of ``logdet`` is equivalent to
``np.linalg.slogdet(scale)``. It is ~2x faster though.
"""
c_decomp = scipy.linalg.cholesky(scale, lower=True)
logdet = 2 * np.sum(np.log(c_decomp.diagonal()))
return c_decomp, logdet
wishart = wishart_gen()
class wishart_frozen(multi_rv_frozen):
"""
Create a frozen Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
"""
def __init__(self, df, scale, seed=None):
self._dist = wishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale)
self.C, self.log_det_scale = self._dist._cholesky_logdet(self.scale)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale, self.C)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out)
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out)
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
return self._dist._entropy(self.dim, self.df, self.log_det_scale)
# Set frozen generator docstrings from corresponding docstrings in
# Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs', 'entropy']:
method = wishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
from numpy import asarray_chkfinite, asarray
from scipy.linalg.misc import LinAlgError
from scipy.linalg.lapack import get_lapack_funcs
def _cho_inv_batch(a, check_finite=True):
"""
Invert the matrices a_i, using a Cholesky factorization of A, where
a_i resides in the last two dimensions of a and the other indices describe
the index i.
Overwrites the data in a.
Parameters
----------
a : array
Array of matrices to invert, where the matrices themselves are stored
in the last two dimensions.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : array
Array of inverses of the matrices ``a_i``.
See also
--------
scipy.linalg.cholesky : Cholesky factorization of a matrix
"""
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) < 2 or a1.shape[-2] != a1.shape[-1]:
raise ValueError('expected square matrix in last two dimensions')
potrf, potri = get_lapack_funcs(('potrf','potri'), (a1,))
tril_idx = np.tril_indices(a.shape[-2], k=-1)
triu_idx = np.triu_indices(a.shape[-2], k=1)
for index in np.ndindex(a1.shape[:-2]):
# Cholesky decomposition
a1[index], info = potrf(a1[index], lower=True, overwrite_a=False,
clean=False)
if info > 0:
raise LinAlgError("%d-th leading minor not positive definite"
% info)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Inversion
a1[index], info = potri(a1[index], lower=True, overwrite_c=False)
if info > 0:
raise LinAlgError("the inverse could not be computed")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Make symmetric (dpotri only fills in the lower triangle)
a1[index][triu_idx] = a1[index][tril_idx]
return a1
class invwishart_gen(wishart_gen):
r"""
An inverse Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal covariance matrix.
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from an inverse Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" inverse Wishart
random variable:
rv = invwishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
wishart
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The inverse Wishart distribution is often denoted
.. math::
W_p^{-1}(\nu, \Psi)
where :math:`\nu` is the degrees of freedom and :math:`\Psi` is the
:math:`p \times p` scale matrix.
The probability density function for `invwishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W^{-1}_p(\nu, \Sigma)`,
then its PDF is given by:
.. math::
f(S) = \frac{|\Sigma|^\frac{\nu}{2}}{2^{ \frac{\nu p}{2} }
|S|^{\frac{\nu + p + 1}{2}} \Gamma_p \left(\frac{\nu}{2} \right)}
\exp\left( -tr(\Sigma S^{-1}) / 2 \right)
If :math:`S \sim W_p^{-1}(\nu, \Psi)` (inverse Wishart) then
:math:`S^{-1} \sim W_p(\nu, \Psi^{-1})` (Wishart).
If the scale matrix is 1-dimensional and equal to one, then the inverse
Wishart distribution :math:`W_1(\nu, 1)` collapses to the
inverse Gamma distribution with parameters shape = :math:`\frac{\nu}{2}`
and scale = :math:`\frac{1}{2}`.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] M.C. Jones, "Generating Inverse Wishart Matrices", Communications in
Statistics - Simulation and Computation, vol. 14.2, pp.511-514, 1985.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import invwishart, invgamma
>>> x = np.linspace(0.01, 1, 100)
>>> iw = invwishart.pdf(x, df=6, scale=1)
>>> iw[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> ig = invgamma.pdf(x, 6/2., scale=1./2)
>>> ig[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> plt.plot(x, iw)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super(invwishart_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""
Create a frozen inverse Wishart distribution.
See `invwishart_frozen` for more information.
"""
return invwishart_frozen(df, scale, seed)
def _logpdf(self, x, dim, df, scale, log_det_scale):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function.
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
log_det_x = np.zeros(x.shape[-1])
#scale_x_inv = np.zeros(x.shape)
x_inv = np.copy(x).T
if dim > 1:
_cho_inv_batch(x_inv) # works in-place
else:
x_inv = 1./x_inv
tr_scale_x_inv = np.zeros(x.shape[-1])
for i in range(x.shape[-1]):
C, lower = scipy.linalg.cho_factor(x[:,:,i], lower=True)
log_det_x[i] = 2 * np.sum(np.log(C.diagonal()))
#scale_x_inv[:,:,i] = scipy.linalg.cho_solve((C, True), scale).T
tr_scale_x_inv[i] = np.dot(scale, x_inv[i]).trace()
# Log PDF
out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) -
(0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) -
multigammaln(0.5*df, dim))
return out
def logpdf(self, x, df, scale):
"""
Log of the inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
_, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""
Inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
if df > dim + 1:
out = scale / (df - dim - 1)
else:
out = None
return out
def mean(self, df, scale):
"""
Mean of the inverse Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus one.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float or None
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _mode(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
return scale / (df + dim + 1)
def mode(self, df, scale):
"""
Mode of the inverse Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out)
def _var(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
if df > dim + 3:
var = (df - dim + 1) * scale**2
diag = scale.diagonal() # 1 x dim array
var += (df - dim - 1) * np.outer(diag, diag)
var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3)
else:
var = None
return var
def var(self, df, scale):
"""
Variance of the inverse Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus three.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _rvs(self, n, shape, dim, df, C, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Get random draws A such that A ~ W(df, I)
A = super(invwishart_gen, self)._standard_rvs(n, shape, dim,
df, random_state)
# Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale)
eye = np.eye(dim)
trtrs = get_lapack_funcs(('trtrs'), (A,))
for index in np.ndindex(A.shape[:-2]):
# Calculate CA
CA = np.dot(C, A[index])
# Get (C A)^{-1} via triangular solver
if dim > 1:
CA, info = trtrs(CA, eye, lower=True)
if info > 0:
raise LinAlgError("Singular matrix.")
if info < 0:
raise ValueError('Illegal value in %d-th argument of'
' internal trtrs' % -info)
else:
CA = 1. / CA
# Get SA
A[index] = np.dot(CA.T, CA)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""
Draw random samples from an inverse Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Invert the scale
eye = np.eye(dim)
L, lower = scipy.linalg.cho_factor(scale, lower=True)
inv_scale = scipy.linalg.cho_solve((L, lower), eye)
# Cholesky decomposition of inverted scale
C = scipy.linalg.cholesky(inv_scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
invwishart = invwishart_gen()
class invwishart_frozen(multi_rv_frozen):
def __init__(self, df, scale, seed=None):
"""
Create a frozen inverse Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
"""
self._dist = invwishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale
)
# Get the determinant via Cholesky factorization
C, lower = scipy.linalg.cho_factor(self.scale, lower=True)
self.log_det_scale = 2 * np.sum(np.log(C.diagonal()))
# Get the inverse using the Cholesky factorization
eye = np.eye(self.dim)
self.inv_scale = scipy.linalg.cho_solve((C, lower), eye)
# Get the Cholesky factorization of the inverse scale
self.C = scipy.linalg.cholesky(self.inv_scale, lower=True)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out)
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
# Set frozen generator docstrings from corresponding docstrings in
# inverse Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']:
method = invwishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
| bsd-3-clause |
DistributedML/TorML | ML_experimental/code/logistic_main.py | 1 | 8987 | from __future__ import division
import utils
import logistic_model
import global_model
import pdb
from sklearn.linear_model import SGDClassifier
from sklearn.svm import LinearSVC
import numpy as np
# Load Binary and Multi -class data
data = utils.load_dataset("creditcard")
XBin, yBin = data['X'], data['y']
XBinValid, yBinValid = data['Xvalid'], data['yvalid']
#Shuffle data
(n,d) = XBin.shape
#all_data = np.hstack((XBin, yBin))
#np.random.shuffle(all_data)
#all_data_valid = np.hstack((XBinValid, yBinValid))
#XBin,yBin = np.hsplit(all_data,d)
#XBinValid, yBinValid = np.hsplit(all_data_valid,d)
cut1 = int(XBin.shape[0] * 0.2)
cut2 = int(XBin.shape[0] * 0.4)
cut3 = int(XBin.shape[0] * 0.6)
cut4 = int(XBin.shape[0] * 0.8)
cut5 = XBin.shape[0]
cutVal = int(XBinValid.shape[0] * 0.5)
cutVal2 = XBinValid.shape[0]
if __name__ == "__main__":
model1 = logistic_model.logRegL2(XBin[0:cut1, :], yBin[0:cut1],
lammy=0.1, verbose=0, maxEvals=400)
model1.fit()
model2 = logistic_model.logRegL2(XBin[cut1 + 1:cut2, :], yBin[cut1 + 1:cut2],
lammy=0.1, verbose=0, maxEvals=400)
model2.fit()
model3 = logistic_model.logRegL2(XBin[cut2 + 1:cut3, :], yBin[cut2 + 1:cut3],
lammy=0.1, verbose=0, maxEvals=400)
model3.fit()
model4 = logistic_model.logRegL2(XBin[cut3 + 1:cut4, :], yBin[cut3 + 1:cut4],
lammy=0.1, verbose=0, maxEvals=400)
model4.fit()
model5 = logistic_model.logRegL2(XBin[cut4 + 1:cut5, :], yBin[cut4 + 1:cut5],
lammy=0.1, verbose=0, maxEvals=400)
model5.fit()
print("model1 Validation error %.3f" %
utils.classification_error(model1.predict(XBinValid), yBinValid))
print("model2 Validation error %.3f" %
utils.classification_error(model2.predict(XBinValid), yBinValid))
print("model3 Validation error %.3f" %
utils.classification_error(model3.predict(XBinValid), yBinValid))
print("model4 Validation error %.3f" %
utils.classification_error(model4.predict(XBinValid), yBinValid))
print("model5 Validation error %.3f" %
utils.classification_error(model5.predict(XBinValid), yBinValid))
clf = SGDClassifier(loss="hinge", penalty="l2")
clf.fit(XBin, yBin)
print("sklearn sgd validation error %.3f" %
utils.classification_error(clf.predict(XBinValid), yBinValid))
svmclf = LinearSVC()
svmclf.fit(XBin, yBin)
print("sklearn SVM validation error %.3f" %
utils.classification_error(svmclf.predict(XBinValid), yBinValid))
# GLOBAL MODEL
global_model_gd = global_model.globalModel(
logistic=True, verbose=0, maxEvals=500)
global_model_gd.add_model(model1)
global_model_gd.add_model(model2)
global_model_gd.add_model(model3)
global_model_gd.add_model(model4)
global_model_gd.add_model(model5)
global_model_gd.fit(theta=1)
print("global 1 GD Training error %.3f" %
utils.classification_error(global_model_gd.predict(XBin), yBin))
print("global 1 GD Validation error %.3f" %
utils.classification_error(global_model_gd.predict(XBinValid), yBinValid))
# GLOBAL MODEL with SGD
global_model_sgd = global_model.globalModel(
logistic=True, verbose=0, maxEvals=100000)
global_model_sgd.add_model(model1)
global_model_sgd.add_model(model2)
global_model_sgd.add_model(model3)
global_model_sgd.add_model(model4)
global_model_sgd.add_model(model5)
training_batch_size = 5
print("STOCHASTIC BS is %.0f" % training_batch_size)
global_model_sgd.fit(theta=1, batch_size=training_batch_size)
print("global 1 SGD Training error %.3f" %
utils.classification_error(global_model_sgd.predict(XBin), yBin))
print("global 1 SGD Validation error %.3f" %
utils.classification_error(global_model_sgd.predict(XBinValid), yBinValid))
# GLOBAL MODEL with private SGD
global_model_sgd = global_model.globalModel(logistic=True, verbose=0, maxEvals=100000)
global_model_sgd.add_model(model1)
global_model_sgd.add_model(model2)
global_model_sgd.add_model(model3)
global_model_sgd.add_model(model4)
global_model_sgd.add_model(model5)
training_batch_size = 5
print("STOCHASTIC BS is %.0f" % training_batch_size)
global_model_sgd.sgd_fit_private(alpha=1, eta=0.01, batch_size=training_batch_size)
print("global 1 SGD Training error %.3f" %
utils.classification_error(global_model_sgd.predict(XBin), yBin))
print("global 1 SGD Validation error %.3f" %
utils.classification_error(global_model_sgd.predict(XBinValid), yBinValid))
# GLOBAL MODEL with PEGASOS
global_model_pegasos = global_model.globalModelSVM(
logistic=True, verbose=0, maxEvals=100000)
global_model_pegasos.add_model(model1)
global_model_pegasos.add_model(model2)
global_model_pegasos.add_model(model3)
global_model_pegasos.add_model(model4)
global_model_pegasos.add_model(model5)
global_model_pegasos.fit(batch_size=training_batch_size)
print("global SVM Training error %.3f" %
utils.classification_error(global_model_pegasos.predict(XBin), yBin))
print("global SVM Validation error %.3f" %
utils.classification_error(global_model_pegasos.predict(XBinValid), yBinValid))
# FULL
sk_full = logistic_model.logRegL2(XBin, yBin,
lammy=0.1, verbose=0, maxEvals=100000)
sk_full.sgd_fit(theta=1, batch_size=training_batch_size)
print("full Training error %.3f" %
utils.classification_error(sk_full.predict(XBin), yBin))
print("full Validation error %.3f" %
utils.classification_error(sk_full.predict(XBinValid), yBinValid))
# RAW AVERAGE
print("----------------------------------------------")
print("global-averaging e=0.1 Validation error %.3f" %
utils.classification_error(global_model_gd.predictAverage(
XBinValid, epsilon=0.1), yBinValid))
print("global-averaging e=0.01 Validation error %.3f" %
utils.classification_error(global_model_gd.predictAverage(
XBinValid, epsilon=0.01), yBinValid))
print("global-averaging e=0.001 Validation error %.3f" %
utils.classification_error(global_model_gd.predictAverage(
XBinValid, epsilon=0.001), yBinValid))
# WEIGHTED AVERAGE on public labelled
global_model_gd.fitWeightedAverage(
XBinValid[0:cutVal, :], yBinValid[0:cutVal], epsilon=0.1)
print("global-weighted e=0.1 Validation error %.3f" %
utils.classification_error(global_model_gd.predictWeightedAverage(
XBinValid[cutVal + 1:cutVal2, :]), yBinValid[cutVal + 1:cutVal2]))
# WEIGHTED AVERAGE on public labelled
global_model_gd.fitWeightedAverage(
XBinValid[0:cutVal, :], yBinValid[0:cutVal], epsilon=0.01)
print("global-weighted e=0.01 Validation error %.3f" %
utils.classification_error(global_model_gd.predictWeightedAverage(
XBinValid[cutVal + 1:cutVal2, :]), yBinValid[cutVal + 1:cutVal2]))
# WEIGHTED AVERAGE on public labelled
global_model_gd.fitWeightedAverage(
XBinValid[0:cutVal, :], yBinValid[0:cutVal], epsilon=0.001)
print("global-weighted e=0.001 Validation error %.3f" %
utils.classification_error(global_model_gd.predictWeightedAverage(
XBinValid[cutVal + 1:cutVal2, :]), yBinValid[cutVal + 1:cutVal2]))
'''
### KNOWLEDGE TRANSFER on public unlabelled
ypub = global_model_gd.predictAverage(XBinValid[0:cutVal,:], epsilon=0.1)
global_kt = logistic_model.logRegL2(XBinValid[0:cutVal,:], ypub, lammy=0.1, verbose=0, maxEvals=400)
global_kt.fit()
print("global-knowledge-transfer e=0.1 Validation error %.3f" %
utils.classification_error(global_kt.predict(
XBinValid[cutVal+1:cutVal2,:]), yBinValid[cutVal+1:cutVal2]))
### KNOWLEDGE TRANSFER on public unlabelled
ypub = global_model_gd.predictAverage(XBinValid[0:cutVal,:], epsilon=0.01)
global_kt = logistic_model.logRegL2(XBinValid[0:cutVal,:], ypub, lammy=0.1, verbose=0, maxEvals=400)
global_kt.fit()
print("global-knowledge-transfer e=0.01 Validation error %.3f" %
utils.classification_error(global_kt.predict(
XBinValid[cutVal+1:cutVal2,:]), yBinValid[cutVal+1:cutVal2]))
### KNOWLEDGE TRANSFER on public unlabelled
ypub = global_model_gd.predictAverage(XBinValid[0:cutVal,:], epsilon=0.001)
global_kt = logistic_model.logRegL2(XBinValid[0:cutVal,:], ypub, lammy=0.1, verbose=0, maxEvals=400)
global_kt.fit()
print("global-knowledge-transfer e=0.001 Validation error %.3f" %
utils.classification_error(global_kt.predict(
XBinValid[cutVal+1:cutVal2,:]), yBinValid[cutVal+1:cutVal2]))
'''
| mit |
ilo10/scikit-learn | examples/semi_supervised/plot_label_propagation_versus_svm_iris.py | 286 | 2378 | """
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
| bsd-3-clause |
pypot/scikit-learn | benchmarks/bench_plot_lasso_path.py | 301 | 4003 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
#ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
#ax.legend()
i += 1
plt.show()
| bsd-3-clause |
francesco-mannella/dmp-esn | parametric/parametric_dmp/bin/tr_datasets/e_cursive_curves_angles_start_none/trajectories/plot.py | 18 | 1043 | #!/usr/bin/env python
import glob
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
pathname = os.path.dirname(sys.argv[0])
if pathname:
os.chdir(pathname)
n_dim = None
trains = []
for fname in glob.glob("tl*"):
t = np.loadtxt(fname)
trains.append(t)
tests = []
for fname in glob.glob("tt*"):
t = np.loadtxt(fname)
tests.append(t)
trial_results= []
for fname in glob.glob("rtl*"):
t = np.loadtxt(fname)
trial_results.append(t)
test_results= []
for fname in glob.glob("rtt*"):
t = np.loadtxt(fname)
test_results.append(t)
fig = plt.figure()
ax = fig.add_subplot(111, aspect="equal")
for d in trains:
ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color="blue", lw=3, alpha=0.5)
for d in tests:
ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color="red", lw=3, alpha=0.5)
for d in trial_results:
ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color=[0,0,.5], lw=2)
for d in test_results:
ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color=[.5,0,0], lw=2)
plt.show()
| gpl-2.0 |
mjgrav2001/scikit-learn | sklearn/tests/test_dummy.py | 129 | 17774 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
@ignore_warnings
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_and_prior_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
if strategy == "prior":
assert_array_equal(clf.predict_proba(X[0]),
clf.class_prior_.reshape((1, -1)))
else:
assert_array_equal(clf.predict_proba(X[0]),
clf.class_prior_.reshape((1, -1)) > 0.5)
def test_most_frequent_and_prior_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
for strategy in ("prior", "most_frequent"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1 / 3, decimal=1)
assert_almost_equal(p[2], 1 / 3, decimal=1)
assert_almost_equal(p[4], 1 / 3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_and_prior_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
y_expected = np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), y_expected)
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert_equal(est.constant_, np.average(y, weights=sample_weight))
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 50.))
est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y,
sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 95.))
| bsd-3-clause |
Featuretools/featuretools | featuretools/utils/wrangle.py | 1 | 5744 | import re
from datetime import datetime
import numpy as np
import pandas as pd
from featuretools import variable_types
from featuretools.entityset.timedelta import Timedelta
def _check_timedelta(td):
"""
Convert strings to Timedelta objects
Allows for both shortform and longform units, as well as any form of capitalization
'2 Minutes'
'2 minutes'
'2 m'
'1 Minute'
'1 minute'
'1 m'
'1 units'
'1 Units'
'1 u'
Shortform is fine if space is dropped
'2m'
'1u"
If a pd.Timedelta object is passed, units will be converted to seconds due to the underlying representation
of pd.Timedelta.
If a pd.DateOffset object is passed, it will be converted to a Featuretools Timedelta if it has one
temporal parameter. Otherwise, it will remain a pd.DateOffset.
"""
if td is None:
return td
if isinstance(td, Timedelta):
return td
elif not isinstance(td, (int, float, str, pd.DateOffset, pd.Timedelta)):
raise ValueError("Unable to parse timedelta: {}".format(td))
if isinstance(td, pd.Timedelta):
unit = 's'
value = td.total_seconds()
times = {unit: value}
return Timedelta(times, delta_obj=td)
elif isinstance(td, pd.DateOffset):
# DateOffsets
if td.__class__.__name__ == "DateOffset":
times = dict()
for td_unit, td_value in td.kwds.items():
times[td_unit] = td_value
return Timedelta(times, delta_obj=td)
# Special offsets (such as BDay)
else:
unit = td.__class__.__name__
value = td.__dict__['n']
times = dict([(unit, value)])
return Timedelta(times, delta_obj=td)
else:
pattern = '([0-9]+) *([a-zA-Z]+)$'
match = re.match(pattern, td)
value, unit = match.groups()
try:
value = int(value)
except Exception:
try:
value = float(value)
except Exception:
raise ValueError("Unable to parse value {} from ".format(value) +
"timedelta string: {}".format(td))
times = {unit: value}
return Timedelta(times)
def _check_time_against_column(time, time_column):
'''
Check to make sure that time is compatible with time_column,
where time could be a timestamp, or a Timedelta, number, or None,
and time_column is a Variable. Compatibility means that
arithmetic can be performed between time and elements of time_columnj
If time is None, then we don't care if arithmetic can be performed
(presumably it won't ever be performed)
'''
if time is None:
return True
elif isinstance(time, (int, float)):
return isinstance(time_column,
variable_types.Numeric)
elif isinstance(time, (pd.Timestamp, datetime, pd.DateOffset)):
return isinstance(time_column,
variable_types.Datetime)
elif isinstance(time, Timedelta):
return (isinstance(time_column, (variable_types.Datetime, variable_types.DatetimeTimeIndex)) or
(isinstance(time_column, (variable_types.Ordinal, variable_types.Numeric, variable_types.TimeIndex)) and
time.unit not in Timedelta._time_units))
else:
return False
def _check_time_type(time):
'''
Checks if `time` is an instance of common int, float, or datetime types.
Returns "numeric", "datetime", or "unknown" based on results
'''
time_type = None
if isinstance(time, (datetime, np.datetime64)):
time_type = variable_types.DatetimeTimeIndex
elif isinstance(time, (int, float)) or np.issubdtype(time, np.integer) or np.issubdtype(time, np.floating):
time_type = variable_types.NumericTimeIndex
return time_type
def _dataframes_equal(df1, df2):
# ^ means XOR
if df1.empty ^ df2.empty:
return False
elif not df1.empty and not df2.empty:
if not set(df1.columns) == set(df2.columns):
return False
for c in df1:
df1c = df1[c]
df2c = df2[c]
if df1c.dtype == object:
df1c = df1c.astype('unicode')
if df2c.dtype == object:
df2c = df2c.astype('unicode')
normal_compare = True
if df1c.dtype == object:
dropped = df1c.dropna()
if not dropped.empty:
if isinstance(dropped.iloc[0], tuple):
dropped2 = df2[c].dropna()
normal_compare = False
for i in range(len(dropped.iloc[0])):
try:
equal = dropped.apply(lambda x: x[i]).equals(
dropped2.apply(lambda x: x[i]))
except IndexError:
raise IndexError("If column data are tuples, they must all be the same length")
if not equal:
return False
if normal_compare:
# handle nan equality correctly
# This way is much faster than df1.equals(df2)
result = df1c == df2c
result[pd.isnull(df1c) == pd.isnull(df2c)] = True
if not result.all():
return False
return True
def _is_s3(string):
'''
Checks if the given string is a s3 path.
Returns a boolean.
'''
return "s3://" in string
def _is_url(string):
'''
Checks if the given string is an url path.
Returns a boolean.
'''
return 'http' in string
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.17/_downloads/658fee847402f7a2d3fee71f95131da3/plot_label_activation_from_stc.py | 62 | 1949 | """
==================================================
Extracting time course from source_estimate object
==================================================
Load a SourceEstimate object from stc files and
extract the time course of activation in
individual labels, as well as in a complex label
formed through merging two labels.
"""
# Author: Christian Brodbeck <[email protected]>
#
# License: BSD (3-clause)
import os
import mne
from mne.datasets import sample
import matplotlib.pyplot as plt
print(__doc__)
data_path = sample.data_path()
os.environ['SUBJECTS_DIR'] = data_path + '/subjects'
meg_path = data_path + '/MEG/sample'
# load the stc
stc = mne.read_source_estimate(meg_path + '/sample_audvis-meg')
# load the labels
aud_lh = mne.read_label(meg_path + '/labels/Aud-lh.label')
aud_rh = mne.read_label(meg_path + '/labels/Aud-rh.label')
# extract the time course for different labels from the stc
stc_lh = stc.in_label(aud_lh)
stc_rh = stc.in_label(aud_rh)
stc_bh = stc.in_label(aud_lh + aud_rh)
# calculate center of mass and transform to mni coordinates
vtx, _, t_lh = stc_lh.center_of_mass('sample')
mni_lh = mne.vertex_to_mni(vtx, 0, 'sample')[0]
vtx, _, t_rh = stc_rh.center_of_mass('sample')
mni_rh = mne.vertex_to_mni(vtx, 1, 'sample')[0]
# plot the activation
plt.figure()
plt.axes([.1, .275, .85, .625])
hl = plt.plot(stc.times, stc_lh.data.mean(0), 'b')[0]
hr = plt.plot(stc.times, stc_rh.data.mean(0), 'g')[0]
hb = plt.plot(stc.times, stc_bh.data.mean(0), 'r')[0]
plt.xlabel('Time (s)')
plt.ylabel('Source amplitude (dSPM)')
plt.xlim(stc.times[0], stc.times[-1])
# add a legend including center-of-mass mni coordinates to the plot
labels = ['LH: center of mass = %s' % mni_lh.round(2),
'RH: center of mass = %s' % mni_rh.round(2),
'Combined LH & RH']
plt.figlegend([hl, hr, hb], labels, 'lower center')
plt.suptitle('Average activation in auditory cortex labels', fontsize=20)
plt.show()
| bsd-3-clause |
sanketloke/scikit-learn | sklearn/tests/test_common.py | 27 | 8389 | """
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <[email protected]>
# Gael Varoquaux [email protected]
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import pkgutil
from sklearn.externals.six import PY3
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import ignore_warnings
import sklearn
from sklearn.cluster.bicluster import BiclusterMixin
from sklearn.decomposition import ProjectedGradientNMF
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
_yield_all_checks,
CROSS_DECOMPOSITION,
check_parameters_default_constructible,
check_class_weight_balanced_linear_classifier,
check_transformer_n_iter,
check_non_transformer_estimators_n_iter,
check_get_params_invariance,
check_fit2d_predict1d,
check_fit1d_1sample)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, cloneable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield check_parameters_default_constructible, name, Estimator
def test_non_meta_estimators():
# input validation etc for non-meta estimators
estimators = all_estimators()
for name, Estimator in estimators:
if issubclass(Estimator, BiclusterMixin):
continue
if name.startswith("_"):
continue
for check in _yield_all_checks(name, Estimator):
if issubclass(Estimator, ProjectedGradientNMF):
# The ProjectedGradientNMF class is deprecated
with ignore_warnings():
yield check, name, Estimator
else:
yield check, name, Estimator
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_balanced_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if 'class_weight' in clazz().get_params().keys()
and issubclass(clazz, LinearClassifierMixin)]
for name, Classifier in linear_classifiers:
yield check_class_weight_balanced_linear_classifier, name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_non_transformer_estimators_n_iter():
# Test that all estimators of type which are non-transformer
# and which have an attribute of max_iter, return the attribute
# of n_iter atleast 1.
for est_type in ['regressor', 'classifier', 'cluster']:
regressors = all_estimators(type_filter=est_type)
for name, Estimator in regressors:
# LassoLars stops early for the default alpha=1.0 for
# the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, "max_iter"):
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV']):
continue
# Tested in test_transformer_n_iter below
elif (name in CROSS_DECOMPOSITION or
name in ['LinearSVC', 'LogisticRegression']):
continue
else:
# Multitask models related to ENet cannot handle
# if y is mono-output.
yield (check_non_transformer_estimators_n_iter,
name, estimator, 'Multi' in name)
def test_transformer_n_iter():
transformers = all_estimators(type_filter='transformer')
for name, Estimator in transformers:
if issubclass(Estimator, ProjectedGradientNMF):
# The ProjectedGradientNMF class is deprecated
with ignore_warnings():
estimator = Estimator()
else:
estimator = Estimator()
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if hasattr(estimator, "max_iter") and name not in external_solver:
if isinstance(estimator, ProjectedGradientNMF):
# The ProjectedGradientNMF class is deprecated
with ignore_warnings():
yield check_transformer_n_iter, name, estimator
else:
yield check_transformer_n_iter, name, estimator
def test_get_params_invariance():
# Test for estimators that support get_params, that
# get_params(deep=False) is a subset of get_params(deep=True)
# Related to issue #4465
estimators = all_estimators(include_meta_estimators=False, include_other=True)
for name, Estimator in estimators:
if hasattr(Estimator, 'get_params'):
# The ProjectedGradientNMF class is deprecated
if issubclass(Estimator, ProjectedGradientNMF):
with ignore_warnings():
yield check_get_params_invariance, name, Estimator
else:
yield check_get_params_invariance, name, Estimator
| bsd-3-clause |
HaydenFaulkner/phd | keras_code/rnns/sentence/train.py | 1 | 19333 | import os
import sys
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = dir_path[:dir_path.find('/phd')+4]
if not dir_path in sys.path:
sys.path.append(dir_path)
print(sys.path)
from keras import backend as K
import numpy as np
import random
import matplotlib.pyplot as plt
import time
import datetime
import os.path
import time
from random import randint
from scipy import spatial
import keras_code.rnns.sentence.models as models
import keras_code.rnns.sentence.test as rnn_test
from datasets import vocab_utils
from utilities.logging import print_n_log, refresh_log
import math
import utilities.paths as paths
DRIVE = paths.get_drive()
from evaluation.metrics import score
def get_meteor_stats(model_id, model, split, embedding):
bs = 16
dataset = models.get_dataset(model_id, split, batch_size=bs)
vocab_size, words, dictionary = dataset.get_vocab_stats()
ref = {}
hypo = {}
X = None
Y = None
P = None
S = None
num_of_samples = dataset.number_of_samples()
num_of_batches = int(math.floor(num_of_samples / float(bs)))
for batch_count in range(0, num_of_batches):
x, y, sid = dataset.get_batch_xy(True)
if Y is None:
Y = y
X = x
S = sid
else:
Y = np.append(Y, y, axis=0)
X = np.append(X, x, axis=0)
S = np.append(S, sid, axis=0)
loss = model.evaluate(X, Y, batch_size=bs, verbose=1)
if embedding:
P = model.predict(X, batch_size=bs, verbose=1)
else:
P = model.predict(X, batch_size=bs, verbose=1)
print(loss)
for i in range(len(S)): # i > 500
pred_str = ''
ref_str = ''
for w in range(np.shape(P[i])[0]): # j > 35
# check if is softmax (one-hot)
if not embedding:
word = int(np.argmax(P[i][w]))
if dictionary[word] == '<EOS>':
break
elif dictionary[word] == '<BOS>' and len(pred_str) == 0:
continue
pred_str += dictionary[word] + ' '
else: # is an embedding so find nearest word using cosine sim
closest = 10000000
word = P[i][w]
wordd = None
for key, value in words.items():
if np.sum(word) > 0 and np.sum(value):
if spatial.distance.cosine(np.squeeze(word), np.squeeze(value)) < closest:
closest = spatial.distance.cosine(word, value)
wordd = key + ' '
if wordd == '<BOS>':
wordd = ''
if wordd == '<EOS>':
break
else:
wordd = ''
pred_str += wordd
for w in range(np.shape(Y[i])[0]):
# check if is softmax (one-hot)
if not embedding:
word = int(np.argmax(Y[i][w]))
if dictionary[word] == '<EOS>':
break
elif dictionary[word] == '<BOS>' and len(ref) == 0:
continue
ref_str += dictionary[word]
else: # is an embedding so find nearest word using cosine sim
closest = 1000000000
word = Y[i][w]
wordd = None
for key, value in words.items():
if np.sum(word) > 0 and np.sum(value):
if spatial.distance.cosine(np.squeeze(word), np.squeeze(value)) < closest:
closest = spatial.distance.cosine(word, value)
wordd = key + ' '
if wordd == '<BOS>':
wordd = ''
if wordd == '<EOS>':
break
else:
wordd = ''
ref_str += wordd
hypo[S[i]] = [pred_str]
ref[S[i]] = [ref_str,ref_str]
return loss, score(ref, hypo)["METEOR"]
def train(model_id, identifier, model_path, nb_epoch=200, batch_size=16, load_epoch=None, extra_path=None, embedded=False):
# set the channel order correctly
if K.backend() == 'theano':
K.set_image_dim_ordering('th')
K.set_image_data_format('channels_first')
else:
K.set_image_dim_ordering('tf')
K.set_image_data_format('channels_last')
training_start_time = time.clock()
t_la = [[], []]
t_l = [[], []]
t_a = [[], []]
v_l = [[], []]
v_a = [[], []]
# Load the dataset (train and val)
dataset = models.get_dataset(model_id, 'train', batch_size=batch_size)
dataset_val = models.get_dataset(model_id, 'val', batch_size=batch_size)
vocab_size, words, dictionary = dataset.get_vocab_stats()
model_path = model_path + model_id + '_' + identifier
# Load log
if not os.path.exists(model_path):
os.makedirs(model_path)
log = open(model_path + '/log.txt', "a")
print_n_log(log, '\n\n\nTraining initialised: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now()))
# Check load epoch, if not specified or less than 0 get latest
if (load_epoch is None) or (load_epoch < 0):
load_epoch = 0
for i in range(100,-1,-1):
if os.path.isfile(model_path + '/' + model_id + '_' + identifier + '-e' + str(i) + '.h5'):
load_epoch = i
break
if load_epoch == 0:
print_n_log(log, '\nTraining model from scratch...\n\n')
model = models.get_model_from_id(model_id, identifier, batch_size, vocab_size)
else:
print_n_log(log, '\nLoading past model to train from:\n')
print_n_log(log, '\n' + model_path + '/' + model_id + '_' + identifier + '-e' + str(load_epoch) + '.h5\n\n')
[t_l, v_l, v_a] = np.load(model_path + '/training_stats-e'+str(load_epoch)+'.npy')
model = models.get_model_from_id(model_id, identifier, batch_size, vocab_size, load_epoch=load_epoch)
assert (model is not None), "model is None"
model = models.compile_model(model_id, model)
dataset.get_dataset_statistics(log)
dataset_val.get_dataset_statistics(log)
# TRAIN LOOP
start_time = time.clock()
fig = None
max_feat_val = -1
best_model = None
best_a = -1
best_e = None
for e in range(load_epoch + 1, nb_epoch + 1):
# refresh log every epoch
log = refresh_log(log, model_path)
print_n_log(log, "\n\n--------------------------------------------\nepoch %d\n--------------------------------------------\n" % e)
# Reset and Randomise the dataset per epoch
dataset.reset()
dataset.randomise()
# if extra_path is not None:
# if max_feat_val < 0:
# min_feat_len = 1000000
# min_feat_val = 1000000
# max_feat_val = -1
# for line in lines:
# X_sample = np.load(
# feature_path + vis_model_id + '/' + vis_layer_id + '/npy/' + line.rstrip().split('\t')[0] + '.npy')
#
# min_feat_len = min(min_feat_len, np.shape(X_sample)[0])
# min_feat_val = min(min_feat_val,np.min(X_sample))
# max_feat_val = max(max_feat_val,np.max(X_sample))
#
# with open(extra_path) as f:
# extra_lines = f.readlines() # read the lines into an array
# random.shuffle(extra_lines)
#
# extra_lines = extra_lines[:1000]
#
# log.write('EXTRA LINES INCLUDED: '+extra_path+'\n')
# print('EXTRA LINES INCLUDED: '+extra_path)
# print('Will train on these first')
# X_batch = []
# Y_batch = []
# count = 0
# past = 0
# inner_count = 0
# sum_loss = 0
# start_time_inner = time.clock()
# for line in extra_lines:
# count += 1
# if model_id in ['MSK_10_01','MSK_10_02']:
# X_sample = np.random.rand(randint(min_feat_len, max_feat_len - 1), fs) * (
# max_feat_val - min_feat_val) + min_feat_val # RANDOM X SIMILAR TO NORMAL X
#
# elif int(model_id[8:9]) == 1:
# X_sample = np.random.rand(randint(min_feat_len, max_feat_len-1), fs)*(max_feat_val-min_feat_val)+min_feat_val # RANDOM X SIMILAR TO NORMAL X
# elif int(model_id[8:9]) == 2:
# X_sample = np.zeros((randint(min_feat_len, max_feat_len-1), fs)) # ZEROS
# Y_sample = []
# Y_sample.append(words['<BOS>'])
# for word in line.rstrip().split('\t')[1].split():
# if word in words.keys():
# Y_sample.append(words[word])
# else:
# Y_sample.append(words['<UNK>'])
# Y_sample.append(words['<EOS>'])
#
# # Check lengths of sequences to be sure they lower than current maxs (check for train val and test)
# # if np.shape(X_sample)[0] > max_feat_len:
# # print np.shape(X_sample)[0]
# # if np.shape(Y_sample)[0] > max_sent_len:
# # print np.shape(Y_sample)[0]
#
# X_batch.append(X_sample)
# Y_batch.append(Y_sample)
#
# if (count % batch_size == 0) or (count == len(extra_lines)):
# X_batch = pad_sequences(X_batch, maxlen=max_feat_len, padding='pre')
# Y_batch = pad_sequences(Y_batch, maxlen=max_sent_len, padding='post')
#
# # h = model.fit(X_batch, Y_batch, batch_size=batch_size, nb_epoch=1)
# loss = model.train_on_batch(X_batch, Y_batch)
# inner_count += 1
# sum_loss+=loss
#
# X_batch = []
# Y_batch = []
# if (int((float(count) / len(extra_lines)) * 100) > past) or (count == len(extra_lines)):
# tr = (len(extra_lines) - count) / ((count) / (time.clock() - start_time_inner))
# trt = ((nb_epoch - e + 1) * len(extra_lines) - count) / (
# ((e - 1) * len(extra_lines) + count) / (time.clock() - start_time))
# print('(%d) [%.5f] Image: %d / %d; Epoch TR: %02d:%02d:%02d; Total TR: %02d:%02d:%02d;' % (
# past, sum_loss / inner_count, count, len(extra_lines), int((tr / 60) / 60),
# int((tr / 60) % 60),
# int(tr % 60),
# int((trt / 60) / 60), int((trt / 60) % 60), int(trt % 60)))
# log.write(
# '\n(%d) [%.5f] Image: %d / %d; Epoch TR: %02d:%02d:%02d; Total TR: %02d:%02d:%02d;' % (
# past, sum_loss / inner_count, count, len(extra_lines), int((tr / 60) / 60),
# int((tr / 60) % 60),
# int(tr % 60),
# int((trt / 60) / 60), int((trt / 60) % 60), int(trt % 60)))
#
# # log.close()
# # log = open(model_path + '/log.txt', "a")
# # log.write('\n(%d) [%.5f] Image: %d / %d; Epoch TR: %02d:%02d:%02d; Total TR: %02d:%02d:%02d;' % (
# # past, sum_loss / inner_count, count, len(all_paths), int((tr / 60) / 60), int((tr / 60) % 60),
# # int(tr % 60),
# # int((trt / 60) / 60), int((trt / 60) % 60), int(trt % 60)))
#
# past += 10
# sum_loss = 0
# inner_count = 0
# log.write('\n--------------------------------------------')
# print('\n--------------------------------------------')
past = 0
epoch_start_time = time.clock()
# BATCH LOOP
num_of_samples = dataset.number_of_samples()
num_of_batches = int(math.floor(num_of_samples / float(batch_size)))
sample_count = 0
sum_loss = 0
sum_count = 0
c = 0
for batch_count in range(0, num_of_batches):
c += 1
# Get data per batch
x, y, sid = dataset.get_batch_xy(True)
sample_count += len(y)
# print(c)
loss = model.train_on_batch(x, y)
# Sums since last print
sum_loss += loss
sum_count += 1
if (int((float(batch_count) / num_of_batches) * 100) > past) or (batch_count == num_of_batches - 1):
etr = (num_of_samples - sample_count) * ((time.clock() - epoch_start_time) / float(sample_count))
ttr = ((nb_epoch - e + 1) * num_of_samples - sample_count) / (
((e - 1) * num_of_samples + sample_count) / (time.clock() - training_start_time))
log = refresh_log(log, model_path)
print_n_log(log,
"\n%d .. [loss: %.5f] Batch: %d / %d; Image: %d / %d; Epoch TR: %02d:%02d:%02d; Total TR: %02d:%02d:%02d;" % (
past, sum_loss / sum_count, batch_count, num_of_batches, sample_count, num_of_samples,
int((etr / 60) / 60), int((etr / 60) % 60),
int(etr % 60),
int((ttr / 60) / 60), int((ttr / 60) % 60), int(ttr % 60)))
t_l[0].append((e - 1) + past * .01)
t_l[1].append(sum_loss / sum_count)
past += 10
sum_loss = 0
sum_count = 0
tr = (nb_epoch - e) / (e / (time.clock() - start_time))
print('TR: %02d:%02d:%02d;' % (int((tr / 60) / 60), int((tr / 60) % 60), int(tr % 60)))
####################################################################
if e % 1 == 0:
# l, a = get_meteor_stats(model_id, model, 'train', embedded)
l = 0
a = 0
#TODO herdcode comment out for extra sentences
t_l[0].append(e)
t_l[1].append(l)
t_a[0].append(e)
t_a[1].append(a)
l, a = get_meteor_stats(model_id, model, 'val', embedded)
v_l[0].append(e)
v_l[1].append(l)
v_a[0].append(e)
v_a[1].append(a)
print_n_log(log, '\n -- Val: METEOR --')
print_n_log(log, '\n' + str(a))
print_n_log(log, '\n -----------------\n')
if a > best_a:
best_a = a
best_e = e
best_model = model
# # graph it
# if fig:
# plt.close()
# fig, ax1 = plt.subplots()
# ax1.plot(t_la[0], t_la[1], 'g-')
# # ax1.plot(t_l[0], t_l[1], 'b-')
# # ax1.plot(v_l[0], v_l[1], 'r-')
# ax2 = ax1.twinx()
# ax2.plot(t_a[0], t_a[1], 'b--')
# ax2.plot(v_a[0], v_a[1], 'r--')
# # plt.plot(t_l[0], t_l[1])
# # plt.plot(v_l[0],v_l[1])
# # plt.plot(v_a[0],v_a[1])
# plt.show(block=False)
if e % 1 == 0:
model.save_weights(model_path + '/' + model_id + '_' + identifier + '-e' + str(e) + '_weights.h5', overwrite=True)
# fig.savefig(model_path + '/training.png')
# fig.savefig(model_path + '/training.pdf')
np.save(model_path + '/training_stats_e' + str(e) + '.npy', [t_la, t_l, t_a, v_l, v_a])
results_path = model_path + '/RESULTS/'
if not os.path.exists(results_path):
os.makedirs(results_path)
splits = ['val','test']
for split in splits:
print(split)
loss, scores, ref, hypo = rnn_test.test(split, model_id, model, embedded)
output = "%f\n%s\n" % (loss, str(scores))
for k in ref:
output += '________________________\n'
output += k + '\n'
output += ref[k][0] + '\n'
output += hypo[k][0] + '\n'
with open(results_path + split + '_' + str(e) + '.txt', 'w') as f:
f.write(output)
if e == nb_epoch:
print(output)
# save best
print_n_log(log, '\nBest Epoch: '+str(best_e)+' with val METEOR of '+str(best_a))
best_model.save_weights(model_path + '/' + model_id + '_BEST_e' + str(best_e) + '.h5', overwrite=True)
print_n_log(log, '\nTraining Finished\n')
log.close()
if __name__ == "__main__":
model_id = 'MVTK_02_02' # 'GRU_10_02_6_01'
identifier = '00003'
nb_epoch = 20
batch_size = 16
model_path = DRIVE + 'MODELS/SENTENCE/KERAS/'
load_epoch = 0
# extra_path = DRIVE + 'DATASETS/VIDEO/TENNIS/COMMENTARY/ALIGNED/EXTRA/001-045.txt'
extra_path = None
model = train(model_id, identifier, model_path, nb_epoch, batch_size, load_epoch, extra_path, embedded=False)
model_id = 'MVTK_12_02'
model = train(model_id, identifier, model_path, nb_epoch, batch_size, load_epoch, extra_path, embedded=False)
model_id = 'MVTK_22_02'
model = train(model_id, identifier, model_path, nb_epoch, batch_size, load_epoch, extra_path, embedded=False)
# model_id = 'MVTK_22_01'
# model = train(model_id, identifier, model_path, nb_epoch, batch_size, load_epoch, extra_path, embedded=False)
# model_id = 'MVTK_22_03'
# model = train(model_id, identifier, model_path, nb_epoch, batch_size, load_epoch, extra_path, embedded=False)
# model_id = 'MVTK_22_04'
# model = train(model_id, identifier, model_path, nb_epoch, batch_size, load_epoch, extra_path, embedded=False)
# model_id = 'MVTK_22_05'
# model = train(model_id, identifier, model_path, nb_epoch, batch_size, load_epoch, extra_path, embedded=False)
# model_id = 'MVTK_22_06'
# model = train(model_id, identifier, model_path, nb_epoch, batch_size, load_epoch, extra_path, embedded=False)
# model_id = 'MVTK_03_01'
# model = train(model_id, identifier, model_path, nb_epoch, batch_size, load_epoch, extra_path, embedded=True)
# model_id = 'MVTK_03_02'
# model = train(model_id, identifier, model_path, nb_epoch, batch_size, load_epoch, extra_path, embedded=True)
# model_id = 'MVTK_03_03'
# model = train(model_id, identifier, model_path, nb_epoch, batch_size, load_epoch, extra_path, embedded=True)
# model_id = 'MVTK_03_04'
# model = train(model_id, identifier, model_path, nb_epoch, batch_size, load_epoch, extra_path, embedded=True)
# model_id = 'MVTK_03_05'
# model = train(model_id, identifier, model_path, nb_epoch, batch_size, load_epoch, extra_path, embedded=True)
# model_id = 'MVTK_03_06'
# model = train(model_id, identifier, model_path, nb_epoch, batch_size, load_epoch, extra_path, embedded=True)
| mit |
arcolife/picTagular | compute_histograms.py | 1 | 2730 | """
Ref: http://www.pyimagesearch.com/2014/01/22/clever-girl-a-guide-to-utilizing-color-histograms-for-computer-vision-and-image-search-engines/
- Load an image
- compute different histograms.
Example:
$ python load.py --i corpus/training/1005394_10153724055540103_1684805428_n.jpg
.... Keep pressing any key to proceed with images.
.... Close histograms to proceed with plots.
"""
# import the necessary packages
from matplotlib import pyplot as plt
import numpy as np
import argparse
import cv2
def load(args):
# load the image and show it
image = cv2.imread(args["image"])
cv2.imshow("image", image)
cv2.waitKey()
return image
def compute_grayscale(image):
# convert the image to grayscale and create a histogram
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imshow("gray", gray)
cv2.waitKey()
hist = cv2.calcHist([gray], [0], None, [256], [0, 256])
plt.figure()
plt.title("Grayscale Histogram")
plt.xlabel("Bins")
plt.ylabel("# of Pixels")
plt.plot(hist)
plt.xlim([0, 256])
plt.show()
def compute_flattened_colorhistogram(image):
# grab the image channels, initialize the tuple of colors,
# the figure and the flattened feature vector
chans = cv2.split(image)
colors = ("b", "g", "r")
plt.figure()
plt.title("'Flattened' Color Histogram")
plt.xlabel("Bins")
plt.ylabel("# of Pixels")
features = []
# loop over the image channels
for (chan, color) in zip(chans, colors):
# create a histogram for the current channel and
# concatenate the resulting histograms for each
# channel
hist = cv2.calcHist([chan], [0], None, [256], [0, 256])
features.extend(hist)
# plot the histogram
plt.plot(hist, color = color)
plt.xlim([0, 256])
# here we are simply showing the dimensionality of the
# flattened color histogram 256 bins for each channel
# x 3 channels = 768 total values -- in practice, we would
# normally not use 256 bins for each channel, a choice
# between 32-96 bins are normally used, but this tends
# to be application dependent
print "flattened feature vector size: %d" % (np.array(features).flatten().shape)
plt.show()
if __name__=='__main__':
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image",
required = True,
help = "Path to the image")
args = vars(ap.parse_args())
# load the image
image = load(args)
# compute the grayscale
compute_grayscale(image)
# compute the flattened color histogram
compute_flattened_colorhistogram(image)
| mit |
AlexanderFabisch/scikit-learn | examples/tree/unveil_tree_structure.py | 67 | 4824 | """
=========================================
Understanding the decision tree structure
=========================================
The decision tree structure can be analysed to gain further insight on the
relation between the features and the target to predict. In this example, we
show how to retrieve:
- the binary tree structure;
- the depth of each node and whether or not it's a leaf;
- the nodes that were reached by a sample using the ``decision_path`` method;
- the leaf that was reached by a sample using the apply method;
- the rules that were used to predict a sample;
- the decision path shared by a group of samples.
"""
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
iris = load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
estimator = DecisionTreeClassifier(max_leaf_nodes=3, random_state=0)
estimator.fit(X_train, y_train)
# The decision estimator has an attribute called tree_ which stores the entire
# tree structure and allows access to low level attributes. The binary tree
# tree_ is represented as a number of parallel arrays. The i-th element of each
# array holds information about the node `i`. Node 0 is the tree's root. NOTE:
# Some of the arrays only apply to either leaves or split nodes, resp. In this
# case the values of nodes of the other type are arbitrary!
#
# Among those arrays, we have:
# - left_child, id of the left child of the node
# - right_child, id of the right child of the node
# - feature, feature used for splitting the node
# - threshold, threshold value at the node
#
# Using those arrays, we can parse the tree structure:
n_nodes = estimator.tree_.node_count
children_left = estimator.tree_.children_left
children_right = estimator.tree_.children_right
feature = estimator.tree_.feature
threshold = estimator.tree_.threshold
# The tree structure can be traversed to compute various properties such
# as the depth of each node and whether or not it is a leaf.
node_depth = np.zeros(shape=n_nodes)
is_leaves = np.zeros(shape=n_nodes, dtype=bool)
stack = [(0, -1)] # seed is the root node id and its parent depth
while len(stack) > 0:
node_id, parent_depth = stack.pop()
node_depth[node_id] = parent_depth + 1
# If we have a test node
if (children_left[node_id] != children_right[node_id]):
stack.append((children_left[node_id], parent_depth + 1))
stack.append((children_right[node_id], parent_depth + 1))
else:
is_leaves[node_id] = True
print("The binary tree structure has %s nodes and has "
"the following tree structure:"
% n_nodes)
for i in range(n_nodes):
if is_leaves[i]:
print("%snode=%s leaf node." % (node_depth[i] * "\t", i))
else:
print("%snode=%s test node: go to node %s if X[:, %s] <= %ss else to "
"node %s."
% (node_depth[i] * "\t",
i,
children_left[i],
feature[i],
threshold[i],
children_right[i],
))
print()
# First let's retrieve the decision path of each sample. The decision_path
# method allows to retrieve the node indicator functions. A non zero element of
# indicator matrix at the position (i, j) indicates that the sample i goes
# through the node j.
node_indicator = estimator.decision_path(X_test)
# Similarly, we can also have the leaves ids reached by each sample.
leave_id = estimator.apply(X_test)
# Now, it's possible to get the tests that were used to predict a sample or
# a group of samples. First, let's make it for the sample.
sample_id = 0
node_index = node_indicator.indices[node_indicator.indptr[sample_id]:
node_indicator.indptr[sample_id + 1]]
print('Rules used to predict sample %s: ' % sample_id)
for node_id in node_index:
if leave_id[sample_id] != node_id:
continue
if (X_test[sample_id, feature[node_id]] <= threshold[node_id]):
threshold_sign = "<="
else:
threshold_sign = ">"
print("decision id node %s : (X[%s, %s] (= %s) %s %s)"
% (node_id,
sample_id,
feature[node_id],
X_test[i, feature[node_id]],
threshold_sign,
threshold[node_id]))
# For a group of samples, we have the following common node.
sample_ids = [0, 1]
common_nodes = (node_indicator.toarray()[sample_ids].sum(axis=0) ==
len(sample_ids))
common_node_id = np.arange(n_nodes)[common_nodes]
print("\nThe following samples %s share the node %s in the tree"
% (sample_ids, common_node_id))
print("It is %s %% of all nodes." % (100 * len(common_node_id) / n_nodes,))
| bsd-3-clause |
henningjp/CoolProp | dev/TTSE/check_TTSE_old.py | 2 | 3308 | from CoolProp.Plots import Ph
import CoolProp
import CoolProp.CoolProp as CP
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
import matplotlib.ticker
import numpy as np
import random
fig = plt.figure(figsize=(10, 5))
ax1 = fig.add_axes((0.08, 0.1, 0.32, 0.83))
ax2 = fig.add_axes((0.50, 0.1, 0.32, 0.83))
Ref = 'R245fa'
BICUBIC = CoolProp.AbstractState('BICUBIC&HEOS', Ref)
TTSE = CoolProp.AbstractState('TTSE&HEOS', Ref)
EOS = CoolProp.AbstractState('HEOS', Ref)
MM = EOS.molar_mass()
print(MM)
T = np.linspace(CP.PropsSI(Ref, 'Tmin') + 0.1, CP.PropsSI(Ref, 'Tcrit') - 0.01, 300)
pV = CP.PropsSI('P', 'T', T, 'Q', 1, Ref)
hL = CP.PropsSI('Hmolar', 'T', T, 'Q', 0, Ref)
hV = CP.PropsSI('Hmolar', 'T', T, 'Q', 1, Ref)
HHH1, PPP1, EEE1 = [], [], []
HHH2, PPP2, EEE2 = [], [], []
cNorm = colors.LogNorm(vmin=1e-12, vmax=10)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=plt.get_cmap('jet'))
for a_useless_counter in range(40000):
h = random.uniform(150000 * MM, 590000 * MM)
p = 10**random.uniform(np.log10(100000), np.log10(7000000))
CP.set_debug_level(0)
try:
EOS.update(CoolProp.HmolarP_INPUTS, h, p)
rhoEOS = EOS.rhomolar(); TEOS = EOS.T()
TTSE.update(CoolProp.HmolarP_INPUTS, h, p)
rhoTTSE = TTSE.rhomolar(); TTTSE = TTSE.T()
BICUBIC.update(CoolProp.HmolarP_INPUTS, h, p)
rhoBICUBIC = BICUBIC.rhomolar(); TBICUBIC = BICUBIC.T()
errorTTSE = abs(rhoTTSE / rhoEOS - 1) * 100
errorBICUBIC = abs(rhoBICUBIC / rhoEOS - 1) * 100
if errorTTSE > 100 or errorTTSE < 1e-12:
print("%s %s %s" % (h, p, errorTTSE))
HHH1.append(h)
PPP1.append(p)
EEE1.append(errorTTSE)
HHH2.append(h)
PPP2.append(p)
EEE2.append(errorBICUBIC)
except ValueError as VE:
print('ERROR %s' % VE)
pass
print('done')
SC1 = ax1.scatter(HHH1, PPP1, s=8, c=EEE1, edgecolors='none', cmap=plt.get_cmap('jet'), norm=cNorm)
SC2 = ax2.scatter(HHH2, PPP2, s=8, c=EEE2, edgecolors='none', cmap=plt.get_cmap('jet'), norm=cNorm)
ax1.set_title('Error in Density from TTSE')
ax2.set_title('Error in Density from Bicubic')
for ax in [ax1, ax2]:
ax.set_xlim(250000 * MM, 550000 * MM)
ax.set_ylim(100000, 7000000)
ax.set_yscale('log')
ticks = [100000, 200000, 400000, 600000, 800000, 1000000, 2000000, 4000000, 6000000]
labels = [str(tick) for tick in ticks]
ax.set_yticks(ticks)
ax.set_yticklabels(labels)
ax.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ticks = [150000 * MM, 250000 * MM, 350000 * MM, 450000 * MM, 550000 * MM]
labels = [str(tick) for tick in ticks]
ax.set_xticks(ticks)
ax.set_xticklabels(labels)
ax.tick_params(axis='y', which='minor', left='off')
ax.set_xticklabels(ax.get_xticks() / MM / 1e3)
ax.set_xlabel('Enthalpy [kJ/kg]')
ax.set_yticklabels(ax.get_yticks() / 10**3)
ax.set_ylabel('Pressure [kPa]')
ax.plot(hL, pV, 'k', lw=4)
ax.plot(hV, pV, 'k', lw=4)
cbar_ax = fig.add_axes([0.85, 0.15, 0.06, 0.7])
CB = fig.colorbar(SC1, cax=cbar_ax)
CB.set_label(r'$(\rho/\rho_{EOS}-1)\times 100$ [%]')
plt.savefig('TTSE_BICUBIC.png', dpi=300, transparent=True)
plt.savefig('TTSE_BICUBIC.pdf')
plt.close()
| mit |
mainyanim/eyetoai | findings/jsonify.py | 1 | 13398 | import pandas as pd
import random
import json
from openpyxl import load_workbook
import pymongo
# define values check and append to arr
# define probability array
# read excel
df = pd.read_excel("output.xlsx")
wb = load_workbook('output.xlsx')
ws = wb.get_sheet_by_name('Sheet1') # Define worksheet
def get_dic_from_two_lists(keys, values):
try:
return {keys[i]: values[i] for i in range(len(keys))}
except IndexError:
pass
# Define function to normalize arr values
def normalize(items):
problist = [x / sum(items) for x in items]
# def probslist
def concatvals(row, start, stop):
prob_head = list(df)[start:stop]
width = stop - start
col = start
val_arr = []
prob_arr = []
for i in range(width):
value_temp = df.iloc[row - 2, col]
if isinstance(value_temp, float) is False:
value = [x.strip() for x in value_temp.split(',')]
len_val = len(value)
prob_arr += [prob_head[i] for _ in range(len_val)]
val_arr += value[0:len_val]
col += 1
randparameter = (", ".join(random.choices(val_arr, prob_arr, k=1)))
return randparameter
def grab_data(r, s, x, y):
ps = [concatvals(r + s, x, y)]
return ps
def create_rep(arr, row_data, fname, modality): # get findings
params = []
# to_json = []
if fname == 'mass' and modality == 'Mammography':
for i in range(len(arr)):
try:
params += grab_data(row_data, 0, 14, 19)
row_data += 1
except IndexError:
continue
elif fname == 'calcifications' and modality == 'Mammography':
for i in range(len(arr)):
params += grab_data(row_data, 3, 14, 19)
row_data += 1
elif fname == 'assymetry' and modality == 'Mammography':
for i in range(len(arr)):
params += grab_data(row_data, 6, 14, 19)
row_data += 1
elif fname == 'lymphNodes' and modality == 'Mammography':
for i in range(len(arr)):
params += grab_data(row_data, 7, 14, 19)
row_data += 1
elif fname == 'mass' and modality == 'US':
for i in range(len(arr)):
params += grab_data(row_data, 8, 14, 19)
row_data += 1
elif fname == 'calcificationsUs' and modality == 'US':
for i in range(len(arr)):
params += grab_data(row_data, 12, 14, 19)
row_data += 1
elif fname == 'lymphNodes' and modality == 'US':
for i in range(len(arr)):
params += grab_data(row_data, 13, 14, 19)
row_data += 1
elif fname == 'specialCases' and modality == 'US':
for i in range(len(arr)):
params += grab_data(row_data, 14, 14, 19)
row_data += 1
elif fname == 'mass' and modality == 'MRI':
for i in range(len(arr)):
params += grab_data(row_data, 15, 14, 19)
row_data += 1
elif fname == 'mriFeatures' and modality == 'MRI':
for i in range(len(arr)):
params += grab_data(row_data, 18, 14, 19)
row_data += 1
elif fname == 'kineticCurveAssessment' and modality == 'MRI':
for i in range(len(arr)):
params += grab_data(row_data, 19, 14, 19)
row_data += 1
elif fname == 'nonMassEnhancement(NME)' and modality == 'MRI':
for i in range(len(arr)):
params += grab_data(row_data, 20, 14, 19)
row_data += 1
elif fname == 'nonEnhancingFindings' and modality == 'MRI':
for i in range(len(arr)):
params += grab_data(row_data, 22, 14, 19)
row_data += 1
elif fname == 'lymphNodes' and modality == 'MRI':
for i in range(len(arr)):
params += grab_data(row_data, 22, 14, 19)
row_data += 1
elif fname == 'fatContainingLesions' and modality == 'MRI':
for i in range(len(arr)):
params += grab_data(row_data, 24, 14, 19)
row_data += 1
fs = get_dic_from_two_lists(arr, params)
return fs
def get_name(infile):
with open(infile, 'r') as f:
contents_of_file = f.read()
lines = contents_of_file.splitlines()
line_number = random.randrange(0, len(lines))
person_name = lines[line_number]
return person_name
def get_numcond():
names = len(df.Name.unique())
return names
def get_cond_name():
name_arr = df.Name.unique()
n = list(name_arr)
n_arr = []
for i in range(len(name_arr)):
if (isinstance(n[i], float)) is False:
n_arr += [n[i]]
rand_cond_name = random.choice(n_arr)
return rand_cond_name
def camelCase(st):
output = ''.join(x for x in st.title() if x.isalpha())
return output[0].lower() + output[1:]
class AutoTree(dict):
def __missing__(self, key):
value = self[key] = type(self)()
return value
def check_row(cond_name):
from xlrd import open_workbook
book = open_workbook("output.xlsx")
for sheet in book.sheets():
for rowidx in range(sheet.nrows):
row = sheet.row(rowidx)
for colidx, cell in enumerate(row):
if cell.value == cond_name:
return rowidx + 1
def get_birad(row, col, width):
val_head = list(df)[2:9]
val_arr = []
prob_arr = []
for i in range(width):
value = df.iloc[row - 2, col]
val_arr += [val_head[i]]
prob_arr += [value]
col += 1
randp = (", ".join(random.choices(val_arr, prob_arr, k=1)))
return randp
# Create random with parameter of report numbers
def generate_report(infile, items):
for c in range(items):
filename = 'report' + str(c) + '.json'
name = get_cond_name()
row = check_row(name)
# Read BiRads Probabilities into list
# Read BiRads into list
person_name = get_name(infile)
p_id = random.randrange(100)
p_age = random.randrange(25, 65)
num_cond = random.randrange(1, 5)
"create list of values and slice empty entities from list"
rm = df['Relevant modalities'].values.tolist()[0:26]
r = 'Mammography'
#r = random.choice(rm)
# mammo params
report = {}
report['patient_id'] = p_id
report['patient_name'] = person_name
report['relevantModality'] = r
report['conditions_number'] = num_cond
report['conditions'] = []
if r == 'Mammography':
f_temp = df['Relevant findings'].values.tolist()[0:8]
f_list = [x for i, x in enumerate(f_temp) if i == f_temp.index(x)]
f_num_total = len(f_list)
f_rand = random.randrange(1, f_num_total + 1)
iter_params_mass = ['shape', 'margin', 'density']
iter_params_calc = ['typicallyBenign', 'suspiciousMorphology', 'distribution']
iter_params_a = ['assymetry']
iter_params_lymph = ['lymphNodes']
#print(f)
for k in range(num_cond):
#br = get_birad(row, 2, 7)
cond = camelCase(get_cond_name())
f_togo = [random.choice(f_list) for _ in range(f_rand)]
f = camelCase(random.choice(f_togo))
if f == 'mass':
rep_temp = create_rep(iter_params_mass, row, f, r)
report['conditions'] += [{'condition_name': cond,
'condition_details': [
{'relevant_finding': [{'finding_name': f,
'finding_parameters': rep_temp}
]}
]
}]
elif f == 'calcifications':
rep_temp = create_rep(iter_params_calc, row, f, r)
report['conditions'] += [{'condition_name': cond,
'condition_details': [
{'relevant_finding': [{'finding_name': f,
'finding_parameters': rep_temp}
]}
]
}]
json_t = json.dumps(report, indent=2)
print(json_t)
""""
elif f == 'assymetry':
rep_temp = create_rep(iter_params_a, row, f, r)
findings[cond]['relevantFinding'] += [{f:rep_temp}]
pass
elif f == 'lymphNodes':
rep_temp = create_rep(iter_params_lymph, row, f, r)
findings[cond]['relevantFinding'] += [{f:rep_temp}]
pass
with open(filename, 'w') as f:
json.dump(findings, f, indent = 4)
elif r == 'US':
f_temp = df['Relevant findings'].values.tolist()[8:15]
f_list = [x for i, x in enumerate(f_temp) if i == f_temp.index(x)]
f_num_total = len(f_list)
f_rand = random.randrange(1, f_num_total + 1)
us_params_mass = ['shape', 'margin', 'echo', 'posterior']
us_params_calc = ['calcifications']
us_params_l_nodes = ['lymphNodes']
us_params_sp_cases = ['specialCases']
for i in range(num_cond):
br = get_birad(row, 2, 7)
cond = camelCase(get_cond_name())
findings[cond]['biRad'] = br
findings[cond]['relevantFinding'] = []
# f = 'mass'
for k in range(f_rand + 1):
f = camelCase(random.choice(f_list))
if f == 'mass':
rep_temp = create_rep(us_params_mass, row, f, r)
findings[cond]['relevantFinding'] += [{f: rep_temp}]
elif f == 'calcificationsUs':
rep_temp = create_rep(us_params_calc, row, f, r)
findings[cond]['relevantFinding'] += [{f: rep_temp}]
elif f == 'lymphNodes':
rep_temp = create_rep(us_params_l_nodes, row, f, r)
findings[cond]['relevantFinding'] += [{f: rep_temp}]
else:
rep_temp = create_rep(us_params_sp_cases, row, f, r)
findings[cond]['relevantFinding'] += [{f: rep_temp}]
with open(filename, 'w') as f:
json.dump(findings, f, indent = 4)
elif r == 'MRI':
f_temp = df['Relevant findings'].values.tolist()[15:25]
f_list = [x for i, x in enumerate(f_temp) if i == f_temp.index(x)]
f_num_total = len(f_list)
f_rand = random.randrange(1, f_num_total + 1)
mri_params_mass = ['shape', 'margin', 'internalEnhancement']
mri_params_mri_f = ['mriFeatures']
mri_params_kin_c_a = ['kineticCurveAssessment']
mri_params_nme = ['distribution', 'internalEnhancementPatterns']
mri_params_nef = ['nonEnhancingPatterns']
mri_params_l_nodes = ['lymphNodes']
mri_params_fcl = ['fatContainingLesions']
for i in range(num_cond):
br = get_birad(row, 2, 7)
cond = camelCase(get_cond_name())
findings[cond]['biRad'] = br
findings[cond]['relevantFinding'] = []
# f = 'mass'
for k in range(f_rand + 1):
f = camelCase(random.choice(f_list))
if f == 'mass':
rep_temp = create_rep(mri_params_mass, row, f, r)
findings[cond]['relevantFinding'] += [{f: rep_temp}]
elif f == 'mriFeatures':
rep_temp = create_rep(mri_params_mri_f, row, f, r)
findings[cond]['relevantFinding'] += [{f: rep_temp}]
elif f == 'kineticCurveAssessment':
rep_temp = create_rep(mri_params_kin_c_a, row, f, r)
findings[cond]['relevantFinding'] += [{f: rep_temp}]
elif f == 'nonMassEnhancement(NME)':
rep_temp = create_rep(mri_params_nme, row, f, r)
findings[cond]['relevantFinding'] += [{f: rep_temp}]
elif f == 'nonEnhancingFindings':
rep_temp = create_rep(mri_params_nef, row, f, r)
findings[cond]['relevantFinding'] += [{f: rep_temp}]
elif f == 'lymphNodes':
rep_temp = create_rep(mri_params_l_nodes, row, f, r)
findings[cond]['relevantFinding'] += [{f: rep_temp}]
elif f == 'fatContainingLesions':
rep_temp = create_rep(mri_params_fcl, row, f, r)
findings[cond]['relevantFinding'] += [{f: rep_temp}]
with open(filename, 'w') as f:
json.dump(findings, f, indent = 4)
"""
if __name__ == "__main__":
generate_report("first-names.txt", 1)
| mit |
autoreject/autoreject | examples/plot_visualize_bad_epochs.py | 1 | 5651 | """
===============================
Visualize bad sensors per trial
===============================
This example demonstrates how to use :mod:`autoreject` to
visualize the bad sensors in each trial
"""
# Author: Mainak Jas <[email protected]>
# Denis A. Engemann <[email protected]>
# License: BSD (3-clause)
# sphinx_gallery_thumbnail_number = 2
# %%
# First, we download the data from OpenfMRI which is hosted on OpenNeuro.
# We will do this using ``openneuro-py`` which can be installed using pip
# (``pip install openneuro-py``).
import os
import openneuro
import autoreject
dataset = 'ds000117' # The id code on OpenNeuro for this example dataset
subject_id = 16 # OpenfMRI format of subject numbering
target_dir = os.path.join(
os.path.dirname(autoreject.__file__), '..', 'examples', dataset)
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
openneuro.download(dataset=dataset, target_dir=target_dir,
include=[f'sub-{subject_id}/ses-meg/'])
# %%
# We will create epochs with data starting 200 ms before trigger onset
# and continuing up to 800 ms after that. The data contains visual stimuli for
# famous faces, unfamiliar faces, as well as scrambled faces.
tmin, tmax = -0.2, 0.8
events_id = {'famous/first': 5, 'famous/immediate': 6, 'famous/long': 7}
# %%
# Let us now load all the epochs into memory and concatenate them
import mne # noqa
epochs = list()
for run in range(3, 7):
run_fname = os.path.join(target_dir, f'sub-{subject_id}', 'ses-meg', 'meg',
f'sub-{subject_id}_ses-meg_task-facerecognition'
'_run-{:02d}_meg.fif'.format(run))
raw = mne.io.read_raw_fif(run_fname, preload=True)
raw.pick_types(eeg=True, meg=False, stim=True) # less memory + computation
raw.filter(1., 40., l_trans_bandwidth=0.5, n_jobs=1, verbose='INFO')
raw.set_channel_types({'EEG061': 'eog', 'EEG062': 'eog',
'EEG063': 'ecg', 'EEG064': 'misc'})
raw.rename_channels({'EEG061': 'EOG061', 'EEG062': 'EOG062',
'EEG063': 'ECG063', 'EEG064': 'MISC'})
events = mne.find_events(raw, stim_channel='STI101',
consecutive='increasing',
min_duration=0.003, verbose=True)
# Read epochs
mne.io.set_eeg_reference(raw)
epoch = mne.Epochs(raw, events, events_id, tmin, tmax, proj=True,
baseline=None,
preload=False, reject=None, decim=4)
epochs.append(epoch)
# Same `dev_head_t` for all runs so that we can concatenate them.
epoch.info['dev_head_t'] = epochs[0].info['dev_head_t']
epochs = mne.epochs.concatenate_epochs(epochs)
# %%
# Now, we apply autoreject
from autoreject import AutoReject, compute_thresholds # noqa
this_epoch = epochs['famous']
exclude = [] # XXX
picks = mne.pick_types(epochs.info, meg=False, eeg=True, stim=False,
eog=False, exclude=exclude)
# %%
# Note that :class:`autoreject.AutoReject` by design supports multiple
# channels. If no picks are passed separate solutions will be computed for each
# channel type and internally combines. This then readily supports cleaning
# unseen epochs from the different channel types used during fit.
# Here we only use a subset of channels to save time.
# %%
# Also note that once the parameters are learned, any data can be repaired
# that contains channels that were used during fit. This also means that time
# may be saved by fitting :class:`autoreject.AutoReject` on a
# representative subsample of the data.
ar = AutoReject(picks=picks, random_state=42, n_jobs=1, verbose='tqdm')
epochs_ar, reject_log = ar.fit_transform(this_epoch, return_log=True)
# %%
# We can visualize the cross validation curve over two variables
import numpy as np # noqa
import matplotlib.pyplot as plt # noqa
import matplotlib.patches as patches # noqa
from autoreject import set_matplotlib_defaults # noqa
set_matplotlib_defaults(plt, style='seaborn-white')
loss = ar.loss_['eeg'].mean(axis=-1) # losses are stored by channel type.
plt.matshow(loss.T * 1e6, cmap=plt.get_cmap('viridis'))
plt.xticks(range(len(ar.consensus)), ['%.1f' % c for c in ar.consensus])
plt.yticks(range(len(ar.n_interpolate)), ar.n_interpolate)
# Draw rectangle at location of best parameters
ax = plt.gca()
idx, jdx = np.unravel_index(loss.argmin(), loss.shape)
rect = patches.Rectangle((idx - 0.5, jdx - 0.5), 1, 1, linewidth=2,
edgecolor='r', facecolor='none')
ax.add_patch(rect)
ax.xaxis.set_ticks_position('bottom')
plt.xlabel(r'Consensus percentage $\kappa$')
plt.ylabel(r'Max sensors interpolated $\rho$')
plt.title('Mean cross validation error (x 1e6)')
plt.colorbar()
plt.show()
# %%
# ... and visualize the bad epochs and sensors. Bad sensors which have been
# interpolated are in blue. Bad sensors which are not interpolated are in red.
# Bad trials are also in red.
scalings = dict(eeg=40e-6)
reject_log.plot_epochs(this_epoch, scalings=scalings)
# %%
# ... and the epochs after cleaning with autoreject
epochs_ar.plot(scalings=scalings)
# %%
# The epochs dropped by autoreject are also stored in epochs.drop_log
epochs_ar.plot_drop_log()
# %%
# Finally, the evoked before and after autoreject, for sanity check. We use
# the ``spatial_colors`` argument from MNE as it allows us to see that
# the eyeblinks have not yet been cleaned but the bad channels have been
# repaired.
ylim = dict(eeg=(-15, 15))
epochs.average().plot(ylim=ylim, spatial_colors=True)
epochs_ar.average().plot(ylim=ylim, spatial_colors=True)
| bsd-3-clause |
arasmus/ladder | utils.py | 2 | 5079 | import os
import logging
import numpy as np
import theano
from pandas import DataFrame, read_hdf
from blocks.extensions import Printing, SimpleExtension
from blocks.main_loop import MainLoop
from blocks.roles import add_role
logger = logging.getLogger('main.utils')
def shared_param(init, name, cast_float32, role, **kwargs):
if cast_float32:
v = np.float32(init)
p = theano.shared(v, name=name, **kwargs)
add_role(p, role)
return p
class AttributeDict(dict):
__getattr__ = dict.__getitem__
def __setattr__(self, a, b):
self.__setitem__(a, b)
class DummyLoop(MainLoop):
def __init__(self, extensions):
return super(DummyLoop, self).__init__(algorithm=None,
data_stream=None,
extensions=extensions)
def run(self):
for extension in self.extensions:
extension.main_loop = self
self._run_extensions('before_training')
self._run_extensions('after_training')
class ShortPrinting(Printing):
def __init__(self, to_print, use_log=True, **kwargs):
self.to_print = to_print
self.use_log = use_log
super(ShortPrinting, self).__init__(**kwargs)
def do(self, which_callback, *args):
log = self.main_loop.log
# Iteration
msg = "e {}, i {}:".format(
log.status['epochs_done'],
log.status['iterations_done'])
# Requested channels
items = []
for k, vars in self.to_print.iteritems():
for shortname, vars in vars.iteritems():
if vars is None:
continue
if type(vars) is not list:
vars = [vars]
s = ""
for var in vars:
try:
name = k + '_' + var.name
val = log.current_row[name]
except:
continue
try:
s += ' ' + ' '.join(["%.3g" % v for v in val])
except:
s += " %.3g" % val
if s != "":
items += [shortname + s]
msg = msg + ", ".join(items)
if self.use_log:
logger.info(msg)
else:
print msg
class SaveParams(SimpleExtension):
"""Finishes the training process when triggered."""
def __init__(self, trigger_var, params, save_path, **kwargs):
super(SaveParams, self).__init__(**kwargs)
if trigger_var is None:
self.var_name = None
else:
self.var_name = trigger_var[0] + '_' + trigger_var[1].name
self.save_path = save_path
self.params = params
self.to_save = {}
self.best_value = None
self.add_condition(['after_training'], self.save)
self.add_condition(['on_interrupt'], self.save)
def save(self, which_callback, *args):
if self.var_name is None:
self.to_save = {v.name: v.get_value() for v in self.params}
path = self.save_path + '/trained_params'
logger.info('Saving to %s' % path)
np.savez_compressed(path, **self.to_save)
def do(self, which_callback, *args):
if self.var_name is None:
return
val = self.main_loop.log.current_row[self.var_name]
if self.best_value is None or val < self.best_value:
self.best_value = val
self.to_save = {v.name: v.get_value() for v in self.params}
class SaveExpParams(SimpleExtension):
def __init__(self, experiment_params, dir, **kwargs):
super(SaveExpParams, self).__init__(**kwargs)
self.dir = dir
self.experiment_params = experiment_params
def do(self, which_callback, *args):
df = DataFrame.from_dict(self.experiment_params, orient='index')
df.to_hdf(os.path.join(self.dir, 'params'), 'params', mode='w',
complevel=5, complib='blosc')
class SaveLog(SimpleExtension):
def __init__(self, dir, show=None, **kwargs):
super(SaveLog, self).__init__(**kwargs)
self.dir = dir
self.show = show if show is not None else []
def do(self, which_callback, *args):
df = DataFrame.from_dict(self.main_loop.log, orient='index')
df.to_hdf(os.path.join(self.dir, 'log'), 'log', mode='w',
complevel=5, complib='blosc')
def prepare_dir(save_to, results_dir='results'):
base = os.path.join(results_dir, save_to)
i = 0
while True:
name = base + str(i)
try:
os.makedirs(name)
break
except:
i += 1
return name
def load_df(dirpath, filename, varname=None):
varname = filename if varname is None else varname
fn = os.path.join(dirpath, filename)
return read_hdf(fn, varname)
def filter_funcs_prefix(d, pfx):
pfx = 'cmd_'
fp = lambda x: x.find(pfx)
return {n[fp(n) + len(pfx):]: v for n, v in d.iteritems() if fp(n) >= 0}
| mit |
justincely/cos_monitoring | cosmo/monitor_helpers.py | 1 | 5642 | import pandas as pd
import numpy as np
import datetime
from itertools import repeat
from astropy.time import Time, TimeDelta
from typing import Union, Tuple, Sequence, List
def convert_day_of_year(date: Union[float, str]) -> Time:
"""Convert day of the year (defined as yyyy.ddd where ddd is the day number of that year) to an astropy Time object.
Some important dates for the COS team were recorded in this format.
"""
return Time(
datetime.datetime.strptime(
f'{date:.3f}' if isinstance(date, float) else date,
'%Y.%j'
),
format='datetime'
)
def fit_line(x: Sequence, y: Sequence) -> Tuple[np.poly1d, np.ndarray]:
"""Given arrays x and y, fit a line."""
fit = np.poly1d(np.polyfit(x, y, 1))
return fit, fit(x)
def explode_df(df: pd.DataFrame, list_keywords: list) -> pd.DataFrame:
"""If a dataframe contains arrays for the element of a column or columns given by list_keywords, expand the
dataframe to one row per array element. Each row in list_keywords must be the same length.
"""
idx = df.index.repeat(df[list_keywords[0]].str.len()) # Repeat values based on the number of elements in the arrays
unpacked = pd.concat([pd.DataFrame({x: np.concatenate(df[x].values)}) for x in list_keywords], axis=1)
unpacked.index = idx # assigns repeated index to the unpacked dataframe, unpacked.
# Join unpacked df to the original df and drop the old columns
exploded = unpacked.join(df.drop(list_keywords, 1), how='left').reset_index(drop=True)
if exploded.isna().values.any(): # If there are NaNs, then it didn't make sense to "explode" the input df
raise ValueError('Elements in columns to be exploded are not the same length across rows.')
return exploded
def absolute_time(df: pd.DataFrame = None, expstart: Sequence = None, time: Sequence = None, time_key: str = None,
time_format: str = 'sec') -> TimeDelta:
"""Compute the time sequence relative to the start of the exposure (EXPSTART). Can be computed from a DataFrame that
contains an EXPSTART column and some other time array column, or from an EXPSTART array and time array pair.
"""
# If no input is given raise an error
if df is None and expstart is None and time is None:
raise TypeError('Computing and absolute time requires either a dataframe or set of arrays')
# Check that expstart and time_array are used together
if bool(expstart is not None or time is not None) and not (expstart is not None and time is not None):
raise TypeError('expstart and time must be used together.')
# Ingest given dataframe if one is given and check that it's not used with arrays at the same time
if df is not None:
if bool(expstart is not None or time is not None):
raise ValueError('Cannot use a dataframe and arrays as input at the same time. Use one or the other.')
expstart = df.EXPSTART
time = df.TIME if not time_key else df[time_key]
zero_points = Time(expstart, format='mjd')
time_delta = TimeDelta(time, format=time_format)
return zero_points + time_delta
def create_visibility(trace_lengths: List[int], visible_list: List[bool]) -> List[bool]:
"""Create visibility lists for plotly buttons. trace_lengths and visible_list must be in the correct order.
:param trace_lengths: List of the number of traces in each "button set".
:param visible_list: Visibility setting for each button set (either True or False).
"""
visibility = [] # Total visibility. Length should match the total number of traces in the figure.
for visible, trace_length in zip(visible_list, trace_lengths):
visibility += list(repeat(visible, trace_length)) # Set each trace per button.
return visibility
def v2v3(slew_x: Sequence, slew_y: Sequence) -> Tuple[Union[np.ndarray, pd.Series], Union[np.ndarray, pd.Series]]:
"""Detector coordinates to V2/V3 coordinates."""
# If input are lists, convert to np arrays so that the operations are completed as expected
if isinstance(slew_x, list):
slew_x = np.array(slew_x)
if isinstance(slew_y, list):
slew_y = np.array(slew_y)
rotation_angle = np.radians(45.0) # rotation angle in degrees converted to radians
x_conversion = slew_x * np.cos(rotation_angle)
y_conversion = slew_y * np.sin(rotation_angle)
v2 = x_conversion + y_conversion
v3 = x_conversion - y_conversion
return v2, v3
def get_osm_data(datamodel, detector: str) -> pd.DataFrame:
"""Query for OSM data and append any relevant new data to it."""
data = pd.DataFrame()
if datamodel.model is not None:
query = datamodel.model.select().where(datamodel.model.DETECTOR == detector)
# Need to convert the stored array columns back into... arrays
data = data.append(
datamodel.query_to_pandas(
query,
array_cols=[
'TIME',
'SHIFT_DISP',
'SHIFT_XDISP',
'SEGMENT',
'XC_RANGE',
'LAMPTAB_SEGMENT',
'SEARCH_OFFSET',
'FP_PIXEL_SHIFT'
],
),
sort=True,
ignore_index=True
)
if datamodel.new_data is None:
return data
if not datamodel.new_data.empty:
new_data = datamodel.new_data[datamodel.new_data.DETECTOR == detector].reset_index(drop=True)
data = data.append(new_data, sort=True, ignore_index=True)
return data
| bsd-3-clause |
aflaxman/scikit-learn | sklearn/utils/tests/test_class_weight.py | 55 | 9891 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
# Fix exception in error message formatting when missing label is a string
# https://github.com/scikit-learn/scikit-learn/issues/8312
assert_raise_message(ValueError,
'Class label label_not_present not present',
compute_class_weight,
{'label_not_present': 1.}, classes, y)
# Raise error when y has items not in classes
classes = np.arange(2)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
assert_raises(ValueError, compute_class_weight, {0: 1., 1: 2.}, classes, y)
def test_compute_class_weight_dict():
classes = np.arange(3)
class_weights = {0: 1.0, 1: 2.0, 2: 3.0}
y = np.asarray([0, 0, 1, 2])
cw = compute_class_weight(class_weights, classes, y)
# When the user specifies class weights, compute_class_weights should just
# return them.
assert_array_almost_equal(np.asarray([1.0, 2.0, 3.0]), cw)
# When a class weight is specified that isn't in classes, a ValueError
# should get raised
msg = 'Class label 4 not present.'
class_weights = {0: 1.0, 1: 2.0, 2: 3.0, 4: 1.5}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
msg = 'Class label -1 not present.'
class_weights = {-1: 5.0, 0: 1.0, 1: 2.0, 2: 3.0}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# duplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_balanced_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_balanced_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777,
0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "balanced" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| bsd-3-clause |
gotomypc/scikit-learn | sklearn/feature_selection/tests/test_from_model.py | 244 | 1593 | import numpy as np
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.svm import LinearSVC
iris = load_iris()
def test_transform_linear_model():
for clf in (LogisticRegression(C=0.1),
LinearSVC(C=0.01, dual=False),
SGDClassifier(alpha=0.001, n_iter=50, shuffle=True,
random_state=0)):
for thresh in (None, ".09*mean", "1e-5 * median"):
for func in (np.array, sp.csr_matrix):
X = func(iris.data)
clf.set_params(penalty="l1")
clf.fit(X, iris.target)
X_new = clf.transform(X, thresh)
if isinstance(clf, SGDClassifier):
assert_true(X_new.shape[1] <= X.shape[1])
else:
assert_less(X_new.shape[1], X.shape[1])
clf.set_params(penalty="l2")
clf.fit(X_new, iris.target)
pred = clf.predict(X_new)
assert_greater(np.mean(pred == iris.target), 0.7)
def test_invalid_input():
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=None)
clf.fit(iris.data, iris.target)
assert_raises(ValueError, clf.transform, iris.data, "gobbledigook")
assert_raises(ValueError, clf.transform, iris.data, ".5 * gobbledigook")
| bsd-3-clause |
krez13/scikit-learn | sklearn/datasets/samples_generator.py | 20 | 56502 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
altairpearl/scikit-learn | sklearn/tree/tests/test_tree.py | 7 | 55471 | """
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import struct
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.exceptions import NotFittedError
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.utils import compute_sample_weight
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", "mae")
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
presort=True),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
presort=True),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = ["DecisionTreeClassifier", "DecisionTreeRegressor",
"ExtraTreeClassifier", "ExtraTreeRegressor"]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = assert_warns(
DeprecationWarning, clf.transform, X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [[-2, -1, 1]] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=.6).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=0.).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=0.0).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=1.1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_impurity_split=-1.0).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_split():
"""Test min_samples_split parameter"""
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test for integer parameter
est = TreeEstimator(min_samples_split=10,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert_greater(np.min(node_samples), 9,
"Failed with {0}".format(name))
# test for float parameter
est = TreeEstimator(min_samples_split=0.2,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert_greater(np.min(node_samples), 9,
"Failed with {0}".format(name))
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test integer parameter
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
# test float parameter
est = TreeEstimator(min_samples_leaf=0.1,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_min_impurity_split():
# test if min_impurity_split creates leaves with impurity
# [0, min_impurity_split) when min_samples_leaf = 1 and
# min_samples_split = 2.
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
min_impurity_split = .5
# verify leaf nodes without min_impurity_split less than
# impurity 1e-7
est = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
random_state=0)
assert_less_equal(est.min_impurity_split, 1e-7,
"Failed, min_impurity_split = {0} > 1e-7".format(
est.min_impurity_split))
est.fit(X, y)
for node in range(est.tree_.node_count):
if (est.tree_.children_left[node] == TREE_LEAF or
est.tree_.children_right[node] == TREE_LEAF):
assert_equal(est.tree_.impurity[node], 0.,
"Failed with {0} "
"min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
# verify leaf nodes have impurity [0,min_impurity_split] when using min_impurity_split
est = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
min_impurity_split=min_impurity_split,
random_state=0)
est.fit(X, y)
for node in range(est.tree_.node_count):
if (est.tree_.children_left[node] == TREE_LEAF or
est.tree_.children_right[node] == TREE_LEAF):
assert_greater_equal(est.tree_.impurity[node], 0,
"Failed with {0}, "
"min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
assert_less_equal(est.tree_.impurity[node], min_impurity_split,
"Failed with {0}, "
"min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
def test_pickle():
for name, TreeEstimator in ALL_TREES.items():
if "Classifier" in name:
X, y = iris.data, iris.target
else:
X, y = boston.data, boston.target
est = TreeEstimator(random_state=0)
est.fit(X, y)
score = est.score(X, y)
fitted_attribute = dict()
for attribute in ["max_depth", "node_count", "capacity"]:
fitted_attribute[attribute] = getattr(est.tree_, attribute)
serialized_object = pickle.dumps(est)
est2 = pickle.loads(serialized_object)
assert_equal(type(est2), est.__class__)
score2 = est2.score(X, y)
assert_equal(score, score2,
"Failed to generate same score after pickling "
"with {0}".format(name))
for attribute in fitted_attribute:
assert_equal(getattr(est2.tree_, attribute),
fitted_attribute[attribute],
"Failed to generate same attribute {0} after "
"pickling with {1}".format(attribute, name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = compute_sample_weight("balanced", unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if not est.presort:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 100)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0], [1]], [0, 1]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-3 <= value.flat[0] < 3,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._utils import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = 8 * struct.calcsize("P")
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.tree_.decision_path(X1).toarray(),
d.tree_.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
d.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
s.tree_.decision_path(X1).toarray())
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
@ignore_warnings
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, [X])
@ignore_warnings
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if not TreeEstimator().presort:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
def check_presort_sparse(est, X, y):
assert_raises(ValueError, est.fit, X, y)
def test_presort_sparse():
ests = (DecisionTreeClassifier(presort=True),
DecisionTreeRegressor(presort=True))
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for est, sparse_matrix in product(ests, sparse_matrices):
yield check_presort_sparse, est, sparse_matrix(X), y
def test_decision_path_hardcoded():
X = iris.data
y = iris.target
est = DecisionTreeClassifier(random_state=0, max_depth=1).fit(X, y)
node_indicator = est.decision_path(X[:2]).toarray()
assert_array_equal(node_indicator, [[1, 1, 0], [1, 0, 1]])
def check_decision_path(name):
X = iris.data
y = iris.target
n_samples = X.shape[0]
TreeEstimator = ALL_TREES[name]
est = TreeEstimator(random_state=0, max_depth=2)
est.fit(X, y)
node_indicator_csr = est.decision_path(X)
node_indicator = node_indicator_csr.toarray()
assert_equal(node_indicator.shape, (n_samples, est.tree_.node_count))
# Assert that leaves index are correct
leaves = est.apply(X)
leave_indicator = [node_indicator[i, j] for i, j in enumerate(leaves)]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
# Ensure only one leave node per sample
all_leaves = est.tree_.children_left == TREE_LEAF
assert_array_almost_equal(np.dot(node_indicator, all_leaves),
np.ones(shape=n_samples))
# Ensure max depth is consistent with sum of indicator
max_depth = node_indicator.sum(axis=1).max()
assert_less_equal(est.tree_.max_depth, max_depth)
def test_decision_path():
for name in ALL_TREES:
yield (check_decision_path, name)
def check_no_sparse_y_support(name):
X, y = X_multilabel, csr_matrix(y_multilabel)
TreeEstimator = ALL_TREES[name]
assert_raises(TypeError, TreeEstimator(random_state=0).fit, X, y)
def test_no_sparse_y_support():
# Currently we don't support sparse y
for name in ALL_TREES:
yield (check_no_sparse_y_support, name)
def test_mae():
# check MAE criterion produces correct results
# on small toy dataset
dt_mae = DecisionTreeRegressor(random_state=0, criterion="mae",
max_leaf_nodes=2)
dt_mae.fit([[3],[5],[3],[8],[5]],[6,7,3,4,3])
assert_array_equal(dt_mae.tree_.impurity, [1.4, 1.5, 4.0/3.0])
assert_array_equal(dt_mae.tree_.value.flat, [4, 4.5, 4.0])
dt_mae.fit([[3],[5],[3],[8],[5]],[6,7,3,4,3], [0.6,0.3,0.1,1.0,0.3])
assert_array_equal(dt_mae.tree_.impurity, [7.0/2.3, 3.0/0.7, 4.0/1.6])
assert_array_equal(dt_mae.tree_.value.flat, [4.0, 6.0, 4.0])
| bsd-3-clause |
yaojingwu1992/XlsxWriter | examples/pandas_chart.py | 9 | 1049 | ##############################################################################
#
# An example of converting a Pandas dataframe to an xlsx file with a chart
# using Pandas and XlsxWriter.
#
# Copyright 2013-2015, John McNamara, [email protected]
#
import pandas as pd
# Create a Pandas dataframe from some data.
df = pd.DataFrame({'Data': [10, 20, 30, 20, 15, 30, 45]})
# Create a Pandas Excel writer using XlsxWriter as the engine.
writer = pd.ExcelWriter('pandas_chart.xlsx', engine='xlsxwriter')
# Convert the dataframe to an XlsxWriter Excel object.
df.to_excel(writer, sheet_name='Sheet1')
# Get the xlsxwriter workbook and worksheet objects.
workbook = writer.book
worksheet = writer.sheets['Sheet1']
# Create a chart object.
chart = workbook.add_chart({'type': 'column'})
# Configure the series of the chart from the dataframe data.
chart.add_series({'values': '=Sheet1!$B$2:$B$8'})
# Insert the chart into the worksheet.
worksheet.insert_chart('D2', chart)
# Close the Pandas Excel writer and output the Excel file.
writer.save()
| bsd-2-clause |
staeiou/wiki-stat-notebooks | retention_20180712/query/retention.py | 1 | 1500 | import pandas as pd
editors = pd.read_csv('user_reg_with_userpages_all.tsv', delimiter='\t', infer_datetime_format=True)
from datetime import date
def add_years(d, years):
"""Return a date that's `years` years after the date (or datetime)
object `d`. Return the same calendar date (month and day) in the
destination year, if it exists, otherwise use the following day
(thus changing February 29 to March 1).
"""
try:
return d.replace(year = d.year + years)
except ValueError:
return d + (date(d.year + years, 1, 1) - date(d.year, 1, 1))
from datetime import datetime
for index, row in editors.iterrows():
user_registration = row['user_registration']
reg_dt = datetime.strptime(str(user_registration), "%Y%m%d%H%M%S")
oneyr = add_years(reg_dt, 1)
twoyr = add_years(reg_dt, 2)
year_mo = reg_dt.strftime("%Y-%m")
query = """sql enwiki " """
query += "select " + str(row['user_id']) + " as user_id, " + str(row['user_registration']) + " as user_registration,"
query += "'" + str(year_mo) + "' as year_mo, "
query += """count(rev_id) as survival_1yr from ( select rev_id from revision_userindex where rev_user = """ + str(row['user_id'])
query += " and rev_timestamp BETWEEN " + oneyr.strftime("%Y%m%d%H%M%S")
query += " and " + twoyr.strftime("%Y%m%d%H%M%S") + " limit 1 ) as s"
query += """ " >> /home/staeiou/wiki-stat-notebooks/retention_20180712/query/retention.tsv """
print(query)
| mit |
wlamond/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 39 | 36062 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.random import choice
from sklearn.utils.testing import (assert_equal, assert_false, assert_true,
assert_not_equal, assert_almost_equal,
assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry, SkipTest)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), set(stoplist))
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_countvectorizer_vocab_sets_when_pickling():
# ensure that vocabulary of type set is coerced to a list to
# preserve iteration ordering after deserialization
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_set = set(choice(vocab_words, size=5, replace=False,
random_state=rng))
cv = CountVectorizer(vocabulary=vocab_set)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_countvectorizer_vocab_dicts_when_pickling():
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_dict = dict()
words = choice(vocab_words, size=5, replace=False, random_state=rng)
for y in range(0, 5):
vocab_dict[words[y]] = y
cv = CountVectorizer(vocabulary=vocab_dict)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
def test_vectorizer_string_object_as_input():
message = ("Iterable over raw text documents expected, "
"string object received.")
for vec in [CountVectorizer(), TfidfVectorizer(), HashingVectorizer()]:
assert_raise_message(
ValueError, message, vec.fit_transform, "hello world!")
assert_raise_message(
ValueError, message, vec.fit, "hello world!")
assert_raise_message(
ValueError, message, vec.transform, "hello world!")
| bsd-3-clause |
nanodan/branca | branca/utilities.py | 1 | 13578 | # -*- coding: utf-8 -*-
"""
Utilities
-------
Utility module for Folium helper functions.
"""
from __future__ import absolute_import, division, print_function
import base64
import json
import math
import os
import struct
import zlib
from jinja2 import Environment, PackageLoader
from six import binary_type, text_type
try:
import pandas as pd
except ImportError:
pd = None
try:
import numpy as np
except ImportError:
np = None
rootpath = os.path.abspath(os.path.dirname(__file__))
def get_templates():
"""Get Jinja templates."""
return Environment(loader=PackageLoader('branca', 'templates'))
def legend_scaler(legend_values, max_labels=10.0):
"""
Downsamples the number of legend values so that there isn't a collision
of text on the legend colorbar (within reason). The colorbar seems to
support ~10 entries as a maximum.
"""
if len(legend_values) < max_labels:
legend_ticks = legend_values
else:
spacer = int(math.ceil(len(legend_values)/max_labels))
legend_ticks = []
for i in legend_values[::spacer]:
legend_ticks += [i]
legend_ticks += ['']*(spacer-1)
return legend_ticks
def linear_gradient(hexList, nColors):
"""
Given a list of hexcode values, will return a list of length
nColors where the colors are linearly interpolated between the
(r, g, b) tuples that are given.
Examples
--------
>>> linear_gradient([(0, 0, 0), (255, 0, 0), (255, 255, 0)], 100)
"""
def _scale(start, finish, length, i):
"""
Return the value correct value of a number that is in between start
and finish, for use in a loop of length *length*.
"""
base = 16
fraction = float(i) / (length - 1)
raynge = int(finish, base) - int(start, base)
thex = hex(int(int(start, base) + fraction * raynge)).split('x')[-1]
if len(thex) != 2:
thex = '0' + thex
return thex
allColors = []
# Separate (R, G, B) pairs.
for start, end in zip(hexList[:-1], hexList[1:]):
# Linearly intepolate between pair of hex ###### values and
# add to list.
nInterpolate = 765
for index in range(nInterpolate):
r = _scale(start[1:3], end[1:3], nInterpolate, index)
g = _scale(start[3:5], end[3:5], nInterpolate, index)
b = _scale(start[5:7], end[5:7], nInterpolate, index)
allColors.append(''.join(['#', r, g, b]))
# Pick only nColors colors from the total list.
result = []
for counter in range(nColors):
fraction = float(counter) / (nColors - 1)
index = int(fraction * (len(allColors) - 1))
result.append(allColors[index])
return result
def color_brewer(color_code, n=6):
"""
Generate a colorbrewer color scheme of length 'len', type 'scheme.
Live examples can be seen at http://colorbrewer2.org/
"""
maximum_n = 253
minimum_n = 3
# Raise an error if the n requested is greater than the maximum.
if n > maximum_n:
raise ValueError('The maximum number of colors in a'
' ColorBrewer sequential color series is 253')
if n < minimum_n:
raise ValueError('The minimum number of colors in a'
' ColorBrewer sequential color series is 3')
if not isinstance(color_code, str):
raise ValueError('color should be a string, not a {}.'
.format(type(color_code)))
if color_code[-2:] == '_r':
base_code = color_code[:-2]
core_color_code = base_code + '_' + str(n).zfill(2)
color_reverse = True
else:
base_code = color_code
core_color_code = base_code + '_' + str(n).zfill(2)
color_reverse = False
with open(os.path.join(rootpath, '_schemes.json')) as f:
schemes = json.loads(f.read())
with open(os.path.join(rootpath, '_cnames.json')) as f:
scheme_info = json.loads(f.read())
with open(os.path.join(rootpath, 'scheme_base_codes.json')) as f:
core_schemes = json.loads(f.read())['codes']
if base_code not in core_schemes:
raise ValueError(base_code + ' is not a valid ColorBrewer code')
try:
schemes[core_color_code]
explicit_scheme = True
except KeyError:
explicit_scheme = False
# Only if n is greater than the scheme length do we interpolate values.
if not explicit_scheme:
# Check to make sure that it is not a qualitative scheme.
if scheme_info[base_code] == 'Qualitative':
matching_quals = []
for key in schemes:
if base_code + '_' in key:
matching_quals.append(int(key.split('_')[1]))
raise ValueError('Expanded color support is not available'
' for Qualitative schemes; restrict the'
' number of colors for the ' + base_code +
' code to between ' + str(min(matching_quals)) +
' and ' + str(max(matching_quals))
)
else:
if not color_reverse:
color_scheme = linear_gradient(schemes.get(core_color_code), n)
else:
color_scheme = linear_gradient(schemes.get(core_color_code)[::-1], n)
else:
if not color_reverse:
color_scheme = schemes.get(core_color_code, None)
else:
color_scheme = schemes.get(core_color_code, None)[::-1]
return color_scheme
def split_six(series=None):
"""
Given a Pandas Series, get a domain of values from zero to the 90% quantile
rounded to the nearest order-of-magnitude integer. For example, 2100 is
rounded to 2000, 2790 to 3000.
Parameters
----------
series: Pandas series, default None
Returns
-------
list
"""
if pd is None:
raise ImportError('The Pandas package is required'
' for this functionality')
if np is None:
raise ImportError('The NumPy package is required'
' for this functionality')
def base(x):
if x > 0:
base = pow(10, math.floor(math.log10(x)))
return round(x/base)*base
else:
return 0
quants = [0, 50, 75, 85, 90]
# Some weirdness in series quantiles a la 0.13.
arr = series.values
return [base(np.percentile(arr, x)) for x in quants]
def image_to_url(image, colormap=None, origin='upper'):
"""Infers the type of an image argument and transforms it into a URL.
Parameters
----------
image: string, file or array-like object
* If string, it will be written directly in the output file.
* If file, it's content will be converted as embedded in the
output file.
* If array-like, it will be converted to PNG base64 string and
embedded in the output.
origin : ['upper' | 'lower'], optional, default 'upper'
Place the [0, 0] index of the array in the upper left or
lower left corner of the axes.
colormap : callable, used only for `mono` image.
Function of the form [x -> (r,g,b)] or [x -> (r,g,b,a)]
for transforming a mono image into RGB.
It must output iterables of length 3 or 4, with values between
0. and 1. Hint : you can use colormaps from `matplotlib.cm`.
"""
if hasattr(image, 'read'):
# We got an image file.
if hasattr(image, 'name'):
# We try to get the image format from the file name.
fileformat = image.name.lower().split('.')[-1]
else:
fileformat = 'png'
url = 'data:image/{};base64,{}'.format(
fileformat, base64.b64encode(image.read()).decode('utf-8'))
elif (not (isinstance(image, text_type) or
isinstance(image, binary_type))) and hasattr(image, '__iter__'):
# We got an array-like object.
png = write_png(image, origin=origin, colormap=colormap)
url = 'data:image/png;base64,' + base64.b64encode(png).decode('utf-8')
else:
# We got an URL.
url = json.loads(json.dumps(image))
return url.replace('\n', ' ')
def write_png(data, origin='upper', colormap=None):
"""
Transform an array of data into a PNG string.
This can be written to disk using binary I/O, or encoded using base64
for an inline PNG like this:
>>> png_str = write_png(array)
>>> 'data:image/png;base64,'+png_str.encode('base64')
Inspired from
http://stackoverflow.com/questions/902761/saving-a-numpy-array-as-an-image
Parameters
----------
data: numpy array or equivalent list-like object.
Must be NxM (mono), NxMx3 (RGB) or NxMx4 (RGBA)
origin : ['upper' | 'lower'], optional, default 'upper'
Place the [0,0] index of the array in the upper left or lower left
corner of the axes.
colormap : callable, used only for `mono` image.
Function of the form [x -> (r,g,b)] or [x -> (r,g,b,a)]
for transforming a mono image into RGB.
It must output iterables of length 3 or 4, with values between
0. and 1. Hint: you can use colormaps from `matplotlib.cm`.
Returns
-------
PNG formatted byte string
"""
if np is None:
raise ImportError('The NumPy package is required'
' for this functionality')
if colormap is None:
def colormap(x):
return (x, x, x, 1)
array = np.atleast_3d(data)
height, width, nblayers = array.shape
if nblayers not in [1, 3, 4]:
raise ValueError('Data must be NxM (mono), '
'NxMx3 (RGB), or NxMx4 (RGBA)')
assert array.shape == (height, width, nblayers)
if nblayers == 1:
array = np.array(list(map(colormap, array.ravel())))
nblayers = array.shape[1]
if nblayers not in [3, 4]:
raise ValueError('colormap must provide colors of'
'length 3 (RGB) or 4 (RGBA)')
array = array.reshape((height, width, nblayers))
assert array.shape == (height, width, nblayers)
if nblayers == 3:
array = np.concatenate((array, np.ones((height, width, 1))), axis=2)
nblayers = 4
assert array.shape == (height, width, nblayers)
assert nblayers == 4
# Normalize to uint8 if it isn't already.
if array.dtype != 'uint8':
array = array * 255./array.max(axis=(0, 1)).reshape((1, 1, 4))
array = array.astype('uint8')
# Eventually flip the image.
if origin == 'lower':
array = array[::-1, :, :]
# Transform the array to bytes.
raw_data = b''.join([b'\x00' + array[i, :, :].tobytes()
for i in range(height)])
def png_pack(png_tag, data):
chunk_head = png_tag + data
return (struct.pack('!I', len(data)) +
chunk_head +
struct.pack('!I', 0xFFFFFFFF & zlib.crc32(chunk_head)))
return b''.join([
b'\x89PNG\r\n\x1a\n',
png_pack(b'IHDR', struct.pack('!2I5B', width, height, 8, 6, 0, 0, 0)),
png_pack(b'IDAT', zlib.compress(raw_data, 9)),
png_pack(b'IEND', b'')])
def _camelify(out):
return (''.join(['_' + x.lower() if i < len(out)-1 and x.isupper() and out[i+1].islower() # noqa
else x.lower() + '_' if i < len(out)-1 and x.islower() and out[i+1].isupper() # noqa
else x.lower() for i, x in enumerate(list(out))])).lstrip('_').replace('__', '_') # noqa
def _parse_size(value):
try:
if isinstance(value, int) or isinstance(value, float):
value_type = 'px'
value = float(value)
assert value > 0
else:
value_type = '%'
value = float(value.strip('%'))
assert 0 <= value <= 100
except Exception:
msg = 'Cannot parse value {!r} as {!r}'.format
raise ValueError(msg(value, value_type))
return value, value_type
def _locations_mirror(x):
"""Mirrors the points in a list-of-list-of-...-of-list-of-points.
For example:
>>> _locations_mirror([[[1, 2], [3, 4]], [5, 6], [7, 8]])
[[[2, 1], [4, 3]], [6, 5], [8, 7]]
"""
if hasattr(x, '__iter__'):
if hasattr(x[0], '__iter__'):
return list(map(_locations_mirror, x))
else:
return list(x[::-1])
else:
return x
def _locations_tolist(x):
"""Transforms recursively a list of iterables into a list of list.
"""
if hasattr(x, '__iter__'):
return list(map(_locations_tolist, x))
else:
return x
def none_min(x, y):
if x is None:
return y
elif y is None:
return x
else:
return min(x, y)
def none_max(x, y):
if x is None:
return y
elif y is None:
return x
else:
return max(x, y)
def iter_points(x):
"""Iterates over a list representing a feature, and returns a list of points,
whatever the shape of the array (Point, MultiPolyline, etc).
"""
if isinstance(x, (list, tuple)):
if len(x):
if isinstance(x[0], (list, tuple)):
out = []
for y in x:
out += iter_points(y)
return out
else:
return [x]
else:
return []
else:
raise ValueError('List/tuple type expected. Got {!r}.'.format(x))
| mit |
Doctorhoenikker/dust | radprofile.py | 2 | 6067 |
import numpy as np
from astropy.io import fits
from astropy.io import ascii
import errors as err
## November 30, 2014 : Removed dependence on matplotlib and asciidata
## April 1, 2013 : Added copy function to Profile object
## March 29, 2013 : Updated minus, plus, divide, multiply with error propagating routine (errors.py)
## March 2, 2013 : Updated Profile object with minus, plus, divide, multiply
## Part of radprofile.sh script
## Taken from CygX-3/6601/primary
## Plots a profile when used './radprofile.py rp_filename'
## where the '.txt' extension is missing from rp_filename
import os # Needed for environment variables
import sys
sys.argv
#----------------------------------------------
## The Profile object
class Profile(object):
rleft = 0.0
rright = 0.0
surbri = 0.0
surbri_err = 0.0
@property
def rmid( self ):
return 0.5 * (self.rleft + self.rright)
@property
def area( self ):
return np.pi * (self.rright**2 - self.rleft**2) # pix^2
def __getslice__( self, i,j ):
result = Profile()
result.rleft = self.rleft[i:j]
result.rright = self.rright[i:j]
result.surbri = self.surbri[i:j]
result.surbri_err = self.surbri_err[i:j]
return result
def __getitem__( self, ivals ):
result = Profile()
result.rleft = self.rleft[ivals]
result.rright = self.rright[ivals]
result.surbri = self.surbri[ivals]
result.surbri_err = self.surbri_err[ivals]
return result
def minus( self, value, value_err=0 ):
oldsb = self.surbri
oldsb_err = self.surbri_err
self.surbri = oldsb - value
self.surbri_err = err.prop_add( oldsb_err, value_err )
#self.surbri_err = np.sqrt( oldsb_err**2 + value_err**2 )
return
def plus( self, value, value_err=0 ):
oldsb = self.surbri
oldsb_err = self.surbri_err
self.surbri = oldsb + value
self.surbri_err = err.prop_add( oldsb_err, value_err )
#self.surbri_err = np.sqrt( oldsb_err**2 + value_err**2 )
return
def divide( self, value, value_err=0 ):
oldsb = self.surbri
oldsb_err = self.surbri_err
self.surbri = oldsb / value
self.surbri_err = err.prop_div( oldsb, value, oldsb_err, value_err )
#self.surbri_err = oldsb_err*2 / value
return
def multiply( self, value, value_err=0 ):
oldsb = self.surbri
oldsb_err = self.surbri_err
self.surbri = oldsb * value
self.surbri_err = err.prop_mult( oldsb, value, oldsb_err, value_err )
#self.surbri_err = oldsb_err*2 * value
return
def write( self, filename, indices='all', sci_note=False ):
if indices == 'all':
indices = range(len(self.rmid))
FORMAT = "%f \t%f \t%f \t%f\n"
if sci_note:
FORMAT = "%e \t%e \t%e \t%e\n"
f = open(filename, 'w')
f.write( "# Bin_left\tBin_right\tSurbri\tSurbri_err\n" )
for i in indices:
f.write( FORMAT % \
(self.rleft[i], self.rright[i], self.surbri[i], self.surbri_err[i]) )
f.close()
return
#----------------------------------------------
## Useful functions
def copy_profile( profile ):
result = Profile()
result.rleft = np.array( profile.rleft )
result.rright = np.array( profile.rright )
result.surbri = np.array( profile.surbri )
result.surbri_err = np.array( profile.surbri_err )
return result
def get_profile_fits( filename, flux=False ):
result = Profile()
if flux:
sb_key = 'SUR_FLUX' # phot/cm^2/s/pix^2
sberr_key = 'SUR_FLUX_ERR'
else:
sb_key = 'SUR_BRI' # count/pix^2
sberr_key = 'SUR_BRI_ERR'
hdu_list = fits.open( filename )
data = hdu_list[1].data
result.rleft = data['R'][:,0]
result.rright = data['R'][:,1]
result.surbri = data[sb_key]
result.surbri_err = data[sberr_key]
return result
def get_profile( filename ):
result = Profile()
data = ascii.read( filename )
keys = data.keys()
result.rleft = data[keys[0]]
result.rright = data[keys[1]]
result.surbri = data[keys[2]]
result.surbri_err = data[keys[3]]
return result
def add_profile( profile1, profile2=Profile(), weight1=1.0, weight2=1.0 ):
result = Profile()
# if profile1.rleft != profile2.rleft or profile1.rright != profile2.rright:
# print 'Error: Profile bins need to match up'
# return
result.surbri = profile1.surbri * weight1 + profile2.surbri * weight2
result.surbri_err = np.sqrt( profile1.surbri_err**2 * weight1**2 + profile2.surbri_err**2 * weight2**2 )
result.rleft = profile1.rleft
result.rright = profile1.rright
return result
def make_bkg_profile( template, bkg_value, bkg_err=0.0 ):
result = Profile()
result.rleft = template.rleft
result.rright = template.rright
result.surbri = np.zeros( len(template.rleft) ) + bkg_value
result.surbri_err = np.zeros( len(template.rleft) ) + bkg_err
return result
#----------------------------------------------
## Added Feb 5, 2013 : More useful functions
def add_bkg( profile, bkg_counts, bkg_area ):
## To subtract, put a - sign before bkg_counts
bkg_surbri = bkg_counts / bkg_area
bkg_err = np.sqrt( bkg_counts ) / bkg_area
sbnew = profile.surbri + bkg_surbri
sbnew_err = np.sqrt( profile.surbri_err**2 + bkg_err**2 )
profile.surbri = sbnew
profile.surbri_err = sbnew_err
return
def residual_profile( profile, model ):
result = Profile()
result.rleft = np.array(profile.rleft)
result.rright = np.array(profile.rright)
result.surbri = profile.surbri - model.surbri
result.surbri_err = np.sqrt( profile.surbri_err**2 + model.surbri_err**2 )
return result
#----------------------------------------------
try:
datafile = sys.argv[1]
profile = get_profile( datafile )
except:
pass
| bsd-2-clause |
adamgreenhall/scikit-learn | sklearn/tests/test_naive_bayes.py | 70 | 17509 | import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.cross_validation import cross_val_score, train_test_split
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1:]), 2)
assert_equal(clf.predict_proba([X[0]]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0:1]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba([X[1]])), 1)
assert_almost_equal(np.sum(clf.predict_proba([X[-1]])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([[1, 0]]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([[0, 1, 1, 0, 0, 1]])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
| bsd-3-clause |
simon-pepin/scikit-learn | examples/cluster/plot_lena_compress.py | 271 | 2229 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Vector Quantization Example
=========================================================
The classic image processing example, Lena, an 8-bit grayscale
bit-depth, 512 x 512 sized image, is used here to illustrate
how `k`-means is used for vector quantization.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn import cluster
n_clusters = 5
np.random.seed(0)
try:
lena = sp.lena()
except AttributeError:
# Newer versions of scipy have lena in misc
from scipy import misc
lena = misc.lena()
X = lena.reshape((-1, 1)) # We need an (n_sample, n_feature) array
k_means = cluster.KMeans(n_clusters=n_clusters, n_init=4)
k_means.fit(X)
values = k_means.cluster_centers_.squeeze()
labels = k_means.labels_
# create an array from labels and values
lena_compressed = np.choose(labels, values)
lena_compressed.shape = lena.shape
vmin = lena.min()
vmax = lena.max()
# original lena
plt.figure(1, figsize=(3, 2.2))
plt.imshow(lena, cmap=plt.cm.gray, vmin=vmin, vmax=256)
# compressed lena
plt.figure(2, figsize=(3, 2.2))
plt.imshow(lena_compressed, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# equal bins lena
regular_values = np.linspace(0, 256, n_clusters + 1)
regular_labels = np.searchsorted(regular_values, lena) - 1
regular_values = .5 * (regular_values[1:] + regular_values[:-1]) # mean
regular_lena = np.choose(regular_labels.ravel(), regular_values)
regular_lena.shape = lena.shape
plt.figure(3, figsize=(3, 2.2))
plt.imshow(regular_lena, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# histogram
plt.figure(4, figsize=(3, 2.2))
plt.clf()
plt.axes([.01, .01, .98, .98])
plt.hist(X, bins=256, color='.5', edgecolor='.5')
plt.yticks(())
plt.xticks(regular_values)
values = np.sort(values)
for center_1, center_2 in zip(values[:-1], values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b')
for center_1, center_2 in zip(regular_values[:-1], regular_values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b', linestyle='--')
plt.show()
| bsd-3-clause |
andreh7/deap | deap/gp.py | 9 | 46662 | # This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
"""The :mod:`gp` module provides the methods and classes to perform
Genetic Programming with DEAP. It essentially contains the classes to
build a Genetic Program Tree, and the functions to evaluate it.
This module support both strongly and loosely typed GP.
"""
import copy
import math
import random
import re
import sys
import warnings
from collections import defaultdict, deque
from functools import partial, wraps
from inspect import isclass
from operator import eq, lt
import tools # Needed by HARM-GP
######################################
# GP Data structure #
######################################
# Define the name of type for any types.
__type__ = object
class PrimitiveTree(list):
"""Tree specifically formatted for optimization of genetic
programming operations. The tree is represented with a
list where the nodes are appended in a depth-first order.
The nodes appended to the tree are required to
have an attribute *arity* which defines the arity of the
primitive. An arity of 0 is expected from terminals nodes.
"""
def __init__(self, content):
list.__init__(self, content)
def __deepcopy__(self, memo):
new = self.__class__(self)
new.__dict__.update(copy.deepcopy(self.__dict__, memo))
return new
def __setitem__(self, key, val):
# Check for most common errors
# Does NOT check for STGP constraints
if isinstance(key, slice):
if key.start >= len(self):
raise IndexError("Invalid slice object (try to assign a %s"
" in a tree of size %d). Even if this is allowed by the"
" list object slice setter, this should not be done in"
" the PrimitiveTree context, as this may lead to an"
" unpredictable behavior for searchSubtree or evaluate."
% (key, len(self)))
total = val[0].arity
for node in val[1:]:
total += node.arity - 1
if total != 0:
raise ValueError("Invalid slice assignation : insertion of"
" an incomplete subtree is not allowed in PrimitiveTree."
" A tree is defined as incomplete when some nodes cannot"
" be mapped to any position in the tree, considering the"
" primitives' arity. For instance, the tree [sub, 4, 5,"
" 6] is incomplete if the arity of sub is 2, because it"
" would produce an orphan node (the 6).")
elif val.arity != self[key].arity:
raise ValueError("Invalid node replacement with a node of a"
" different arity.")
list.__setitem__(self, key, val)
def __str__(self):
"""Return the expression in a human readable string.
"""
string = ""
stack = []
for node in self:
stack.append((node, []))
while len(stack[-1][1]) == stack[-1][0].arity:
prim, args = stack.pop()
string = prim.format(*args)
if len(stack) == 0:
break # If stack is empty, all nodes should have been seen
stack[-1][1].append(string)
return string
@classmethod
def from_string(cls, string, pset):
"""Try to convert a string expression into a PrimitiveTree given a
PrimitiveSet *pset*. The primitive set needs to contain every primitive
present in the expression.
:param string: String representation of a Python expression.
:param pset: Primitive set from which primitives are selected.
:returns: PrimitiveTree populated with the deserialized primitives.
"""
tokens = re.split("[ \t\n\r\f\v(),]", string)
expr = []
ret_types = deque()
for token in tokens:
if token == '':
continue
if len(ret_types) != 0:
type_ = ret_types.popleft()
else:
type_ = None
if token in pset.mapping:
primitive = pset.mapping[token]
if type_ is not None and not issubclass(primitive.ret, type_):
raise TypeError("Primitive {} return type {} does not "
"match the expected one: {}."
.format(primitive, primitive.ret, type_))
expr.append(primitive)
if isinstance(primitive, Primitive):
ret_types.extendleft(reversed(primitive.args))
else:
try:
token = eval(token)
except NameError:
raise TypeError("Unable to evaluate terminal: {}.".format(token))
if type_ is None:
type_ = type(token)
if not issubclass(type(token), type_):
raise TypeError("Terminal {} type {} does not "
"match the expected one: {}."
.format(token, type(token), type_))
expr.append(Terminal(token, False, type_))
return cls(expr)
@property
def height(self):
"""Return the height of the tree, or the depth of the
deepest node.
"""
stack = [0]
max_depth = 0
for elem in self:
depth = stack.pop()
max_depth = max(max_depth, depth)
stack.extend([depth + 1] * elem.arity)
return max_depth
@property
def root(self):
"""Root of the tree, the element 0 of the list.
"""
return self[0]
def searchSubtree(self, begin):
"""Return a slice object that corresponds to the
range of values that defines the subtree which has the
element with index *begin* as its root.
"""
end = begin + 1
total = self[begin].arity
while total > 0:
total += self[end].arity - 1
end += 1
return slice(begin, end)
class Primitive(object):
"""Class that encapsulates a primitive and when called with arguments it
returns the Python code to call the primitive with the arguments.
>>> pr = Primitive("mul", (int, int), int)
>>> pr.format(1, 2)
'mul(1, 2)'
"""
__slots__ = ('name', 'arity', 'args', 'ret', 'seq')
def __init__(self, name, args, ret):
self.name = name
self.arity = len(args)
self.args = args
self.ret = ret
args = ", ".join(map("{{{0}}}".format, range(self.arity)))
self.seq = "{name}({args})".format(name=self.name, args=args)
def format(self, *args):
return self.seq.format(*args)
def __eq__(self, other):
if type(self) is type(other):
return all(getattr(self, slot) == getattr(other, slot)
for slot in self.__slots__)
else:
return NotImplemented
class Terminal(object):
"""Class that encapsulates terminal primitive in expression. Terminals can
be values or 0-arity functions.
"""
__slots__ = ('name', 'value', 'ret', 'conv_fct')
def __init__(self, terminal, symbolic, ret):
self.ret = ret
self.value = terminal
self.name = str(terminal)
self.conv_fct = str if symbolic else repr
@property
def arity(self):
return 0
def format(self):
return self.conv_fct(self.value)
def __eq__(self, other):
if type(self) is type(other):
return all(getattr(self, slot) == getattr(other, slot)
for slot in self.__slots__)
else:
return NotImplemented
class Ephemeral(Terminal):
"""Class that encapsulates a terminal which value is set when the
object is created. To mutate the value, a new object has to be
generated. This is an abstract base class. When subclassing, a
staticmethod 'func' must be defined.
"""
def __init__(self):
Terminal.__init__(self, self.func(), symbolic=False, ret=self.ret)
@staticmethod
def func():
"""Return a random value used to define the ephemeral state.
"""
raise NotImplementedError
class PrimitiveSetTyped(object):
"""Class that contains the primitives that can be used to solve a
Strongly Typed GP problem. The set also defined the researched
function return type, and input arguments type and number.
"""
def __init__(self, name, in_types, ret_type, prefix="ARG"):
self.terminals = defaultdict(list)
self.primitives = defaultdict(list)
self.arguments = []
# setting "__builtins__" to None avoid the context
# being polluted by builtins function when evaluating
# GP expression.
self.context = {"__builtins__": None}
self.mapping = dict()
self.terms_count = 0
self.prims_count = 0
self.name = name
self.ret = ret_type
self.ins = in_types
for i, type_ in enumerate(in_types):
arg_str = "{prefix}{index}".format(prefix=prefix, index=i)
self.arguments.append(arg_str)
term = Terminal(arg_str, True, type_)
self._add(term)
self.terms_count += 1
def renameArguments(self, **kargs):
"""Rename function arguments with new names from *kargs*.
"""
for i, old_name in enumerate(self.arguments):
if old_name in kargs:
new_name = kargs[old_name]
self.arguments[i] = new_name
self.mapping[new_name] = self.mapping[old_name]
self.mapping[new_name].value = new_name
del self.mapping[old_name]
def _add(self, prim):
def addType(dict_, ret_type):
if not ret_type in dict_:
new_list = []
for type_, list_ in dict_.items():
if issubclass(type_, ret_type):
for item in list_:
if not item in new_list:
new_list.append(item)
dict_[ret_type] = new_list
addType(self.primitives, prim.ret)
addType(self.terminals, prim.ret)
self.mapping[prim.name] = prim
if isinstance(prim, Primitive):
for type_ in prim.args:
addType(self.primitives, type_)
addType(self.terminals, type_)
dict_ = self.primitives
else:
dict_ = self.terminals
for type_ in dict_:
if issubclass(prim.ret, type_):
dict_[type_].append(prim)
def addPrimitive(self, primitive, in_types, ret_type, name=None):
"""Add a primitive to the set.
:param primitive: callable object or a function.
:parma in_types: list of primitives arguments' type
:param ret_type: type returned by the primitive.
:param name: alternative name for the primitive instead
of its __name__ attribute.
"""
if name is None:
name = primitive.__name__
prim = Primitive(name, in_types, ret_type)
assert name not in self.context or \
self.context[name] is primitive, \
"Primitives are required to have a unique name. " \
"Consider using the argument 'name' to rename your "\
"second '%s' primitive." % (name,)
self._add(prim)
self.context[prim.name] = primitive
self.prims_count += 1
def addTerminal(self, terminal, ret_type, name=None):
"""Add a terminal to the set. Terminals can be named
using the optional *name* argument. This should be
used : to define named constant (i.e.: pi); to speed the
evaluation time when the object is long to build; when
the object does not have a __repr__ functions that returns
the code to build the object; when the object class is
not a Python built-in.
:param terminal: Object, or a function with no arguments.
:param ret_type: Type of the terminal.
:param name: defines the name of the terminal in the expression.
"""
symbolic = False
if name is None and callable(terminal):
name = terminal.__name__
assert name not in self.context, \
"Terminals are required to have a unique name. " \
"Consider using the argument 'name' to rename your "\
"second %s terminal." % (name,)
if name is not None:
self.context[name] = terminal
terminal = name
symbolic = True
elif terminal in (True, False):
# To support True and False terminals with Python 2.
self.context[str(terminal)] = terminal
prim = Terminal(terminal, symbolic, ret_type)
self._add(prim)
self.terms_count += 1
def addEphemeralConstant(self, name, ephemeral, ret_type):
"""Add an ephemeral constant to the set. An ephemeral constant
is a no argument function that returns a random value. The value
of the constant is constant for a Tree, but may differ from one
Tree to another.
:param name: name used to refers to this ephemeral type.
:param ephemeral: function with no arguments returning a random value.
:param ret_type: type of the object returned by *ephemeral*.
"""
module_gp = globals()
if not name in module_gp:
class_ = type(name, (Ephemeral,), {'func': staticmethod(ephemeral),
'ret': ret_type})
module_gp[name] = class_
else:
class_ = module_gp[name]
if issubclass(class_, Ephemeral):
if class_.func is not ephemeral:
raise Exception("Ephemerals with different functions should "
"be named differently, even between psets.")
elif class_.ret is not ret_type:
raise Exception("Ephemerals with the same name and function "
"should have the same type, even between psets.")
else:
raise Exception("Ephemerals should be named differently "
"than classes defined in the gp module.")
self._add(class_)
self.terms_count += 1
def addADF(self, adfset):
"""Add an Automatically Defined Function (ADF) to the set.
:param adfset: PrimitiveSetTyped containing the primitives with which
the ADF can be built.
"""
prim = Primitive(adfset.name, adfset.ins, adfset.ret)
self._add(prim)
self.prims_count += 1
@property
def terminalRatio(self):
"""Return the ratio of the number of terminals on the number of all
kind of primitives.
"""
return self.terms_count / float(self.terms_count + self.prims_count)
class PrimitiveSet(PrimitiveSetTyped):
"""Class same as :class:`~deap.gp.PrimitiveSetTyped`, except there is no
definition of type.
"""
def __init__(self, name, arity, prefix="ARG"):
args = [__type__] * arity
PrimitiveSetTyped.__init__(self, name, args, __type__, prefix)
def addPrimitive(self, primitive, arity, name=None):
"""Add primitive *primitive* with arity *arity* to the set.
If a name *name* is provided, it will replace the attribute __name__
attribute to represent/identify the primitive.
"""
assert arity > 0, "arity should be >= 1"
args = [__type__] * arity
PrimitiveSetTyped.addPrimitive(self, primitive, args, __type__, name)
def addTerminal(self, terminal, name=None):
"""Add a terminal to the set."""
PrimitiveSetTyped.addTerminal(self, terminal, __type__, name)
def addEphemeralConstant(self, name, ephemeral):
"""Add an ephemeral constant to the set."""
PrimitiveSetTyped.addEphemeralConstant(self, name, ephemeral, __type__)
######################################
# GP Tree compilation functions #
######################################
def compile(expr, pset):
"""Compile the expression *expr*.
:param expr: Expression to compile. It can either be a PrimitiveTree,
a string of Python code or any object that when
converted into string produced a valid Python code
expression.
:param pset: Primitive set against which the expression is compile.
:returns: a function if the primitive set has 1 or more arguments,
or return the results produced by evaluating the tree.
"""
code = str(expr)
if len(pset.arguments) > 0:
# This section is a stripped version of the lambdify
# function of SymPy 0.6.6.
args = ",".join(arg for arg in pset.arguments)
code = "lambda {args}: {code}".format(args=args, code=code)
try:
return eval(code, pset.context, {})
except MemoryError:
_, _, traceback = sys.exc_info()
raise MemoryError, ("DEAP : Error in tree evaluation :"
" Python cannot evaluate a tree higher than 90. "
"To avoid this problem, you should use bloat control on your "
"operators. See the DEAP documentation for more information. "
"DEAP will now abort."), traceback
def compileADF(expr, psets):
"""Compile the expression represented by a list of trees. The first
element of the list is the main tree, and the following elements are
automatically defined functions (ADF) that can be called by the first
tree.
:param expr: Expression to compile. It can either be a PrimitiveTree,
a string of Python code or any object that when
converted into string produced a valid Python code
expression.
:param psets: List of primitive sets. Each set corresponds to an ADF
while the last set is associated with the expression
and should contain reference to the preceding ADFs.
:returns: a function if the main primitive set has 1 or more arguments,
or return the results produced by evaluating the tree.
"""
adfdict = {}
func = None
for pset, subexpr in reversed(zip(psets, expr)):
pset.context.update(adfdict)
func = compile(subexpr, pset)
adfdict.update({pset.name: func})
return func
######################################
# GP Program generation functions #
######################################
def genFull(pset, min_, max_, type_=None):
"""Generate an expression where each leaf has a the same depth
between *min* and *max*.
:param pset: Primitive set from which primitives are selected.
:param min_: Minimum height of the produced trees.
:param max_: Maximum Height of the produced trees.
:param type_: The type that should return the tree when called, when
:obj:`None` (default) the type of :pset: (pset.ret)
is assumed.
:returns: A full tree with all leaves at the same depth.
"""
def condition(height, depth):
"""Expression generation stops when the depth is equal to height."""
return depth == height
return generate(pset, min_, max_, condition, type_)
def genGrow(pset, min_, max_, type_=None):
"""Generate an expression where each leaf might have a different depth
between *min* and *max*.
:param pset: Primitive set from which primitives are selected.
:param min_: Minimum height of the produced trees.
:param max_: Maximum Height of the produced trees.
:param type_: The type that should return the tree when called, when
:obj:`None` (default) the type of :pset: (pset.ret)
is assumed.
:returns: A grown tree with leaves at possibly different depths.
"""
def condition(height, depth):
"""Expression generation stops when the depth is equal to height
or when it is randomly determined that a a node should be a terminal.
"""
return depth == height or \
(depth >= min_ and random.random() < pset.terminalRatio)
return generate(pset, min_, max_, condition, type_)
def genHalfAndHalf(pset, min_, max_, type_=None):
"""Generate an expression with a PrimitiveSet *pset*.
Half the time, the expression is generated with :func:`~deap.gp.genGrow`,
the other half, the expression is generated with :func:`~deap.gp.genFull`.
:param pset: Primitive set from which primitives are selected.
:param min_: Minimum height of the produced trees.
:param max_: Maximum Height of the produced trees.
:param type_: The type that should return the tree when called, when
:obj:`None` (default) the type of :pset: (pset.ret)
is assumed.
:returns: Either, a full or a grown tree.
"""
method = random.choice((genGrow, genFull))
return method(pset, min_, max_, type_)
def genRamped(pset, min_, max_, type_=None):
"""
.. deprecated:: 1.0
The function has been renamed. Use :func:`~deap.gp.genHalfAndHalf` instead.
"""
warnings.warn("gp.genRamped has been renamed. Use genHalfAndHalf instead.",
FutureWarning)
return genHalfAndHalf(pset, min_, max_, type_)
def generate(pset, min_, max_, condition, type_=None):
"""Generate a Tree as a list of list. The tree is build
from the root to the leaves, and it stop growing when the
condition is fulfilled.
:param pset: Primitive set from which primitives are selected.
:param min_: Minimum height of the produced trees.
:param max_: Maximum Height of the produced trees.
:param condition: The condition is a function that takes two arguments,
the height of the tree to build and the current
depth in the tree.
:param type_: The type that should return the tree when called, when
:obj:`None` (default) the type of :pset: (pset.ret)
is assumed.
:returns: A grown tree with leaves at possibly different depths
dependending on the condition function.
"""
if type_ is None:
type_ = pset.ret
expr = []
height = random.randint(min_, max_)
stack = [(0, type_)]
while len(stack) != 0:
depth, type_ = stack.pop()
if condition(height, depth):
try:
term = random.choice(pset.terminals[type_])
except IndexError:
_, _, traceback = sys.exc_info()
raise IndexError, "The gp.generate function tried to add "\
"a terminal of type '%s', but there is "\
"none available." % (type_,), traceback
if isclass(term):
term = term()
expr.append(term)
else:
try:
prim = random.choice(pset.primitives[type_])
except IndexError:
_, _, traceback = sys.exc_info()
raise IndexError, "The gp.generate function tried to add "\
"a primitive of type '%s', but there is "\
"none available." % (type_,), traceback
expr.append(prim)
for arg in reversed(prim.args):
stack.append((depth + 1, arg))
return expr
######################################
# GP Crossovers #
######################################
def cxOnePoint(ind1, ind2):
"""Randomly select in each individual and exchange each subtree with the
point as root between each individual.
:param ind1: First tree participating in the crossover.
:param ind2: Second tree participating in the crossover.
:returns: A tuple of two trees.
"""
if len(ind1) < 2 or len(ind2) < 2:
# No crossover on single node tree
return ind1, ind2
# List all available primitive types in each individual
types1 = defaultdict(list)
types2 = defaultdict(list)
if ind1.root.ret == __type__:
# Not STGP optimization
types1[__type__] = xrange(1, len(ind1))
types2[__type__] = xrange(1, len(ind2))
common_types = [__type__]
else:
for idx, node in enumerate(ind1[1:], 1):
types1[node.ret].append(idx)
for idx, node in enumerate(ind2[1:], 1):
types2[node.ret].append(idx)
common_types = set(types1.keys()).intersection(set(types2.keys()))
if len(common_types) > 0:
type_ = random.choice(list(common_types))
index1 = random.choice(types1[type_])
index2 = random.choice(types2[type_])
slice1 = ind1.searchSubtree(index1)
slice2 = ind2.searchSubtree(index2)
ind1[slice1], ind2[slice2] = ind2[slice2], ind1[slice1]
return ind1, ind2
def cxOnePointLeafBiased(ind1, ind2, termpb):
"""Randomly select crossover point in each individual and exchange each
subtree with the point as root between each individual.
:param ind1: First typed tree participating in the crossover.
:param ind2: Second typed tree participating in the crossover.
:param termpb: The probability of chosing a terminal node (leaf).
:returns: A tuple of two typed trees.
When the nodes are strongly typed, the operator makes sure the
second node type corresponds to the first node type.
The parameter *termpb* sets the probability to choose between a terminal
or non-terminal crossover point. For instance, as defined by Koza, non-
terminal primitives are selected for 90% of the crossover points, and
terminals for 10%, so *termpb* should be set to 0.1.
"""
if len(ind1) < 2 or len(ind2) < 2:
# No crossover on single node tree
return ind1, ind2
# Determine wether we keep terminals or primitives for each individual
terminal_op = partial(eq, 0)
primitive_op = partial(lt, 0)
arity_op1 = terminal_op if random.random() < termpb else primitive_op
arity_op2 = terminal_op if random.random() < termpb else primitive_op
# List all available primitive or terminal types in each individual
types1 = defaultdict(list)
types2 = defaultdict(list)
for idx, node in enumerate(ind1[1:], 1):
if arity_op1(node.arity):
types1[node.ret].append(idx)
for idx, node in enumerate(ind2[1:], 1):
if arity_op2(node.arity):
types2[node.ret].append(idx)
common_types = set(types1.keys()).intersection(set(types2.keys()))
if len(common_types) > 0:
# Set does not support indexing
type_ = random.sample(common_types, 1)[0]
index1 = random.choice(types1[type_])
index2 = random.choice(types2[type_])
slice1 = ind1.searchSubtree(index1)
slice2 = ind2.searchSubtree(index2)
ind1[slice1], ind2[slice2] = ind2[slice2], ind1[slice1]
return ind1, ind2
######################################
# GP Mutations #
######################################
def mutUniform(individual, expr, pset):
"""Randomly select a point in the tree *individual*, then replace the
subtree at that point as a root by the expression generated using method
:func:`expr`.
:param individual: The tree to be mutated.
:param expr: A function object that can generate an expression when
called.
:returns: A tuple of one tree.
"""
index = random.randrange(len(individual))
slice_ = individual.searchSubtree(index)
type_ = individual[index].ret
individual[slice_] = expr(pset=pset, type_=type_)
return individual,
def mutNodeReplacement(individual, pset):
"""Replaces a randomly chosen primitive from *individual* by a randomly
chosen primitive with the same number of arguments from the :attr:`pset`
attribute of the individual.
:param individual: The normal or typed tree to be mutated.
:returns: A tuple of one tree.
"""
if len(individual) < 2:
return individual,
index = random.randrange(1, len(individual))
node = individual[index]
if node.arity == 0: # Terminal
term = random.choice(pset.terminals[node.ret])
if isclass(term):
term = term()
individual[index] = term
else: # Primitive
prims = [p for p in pset.primitives[node.ret] if p.args == node.args]
individual[index] = random.choice(prims)
return individual,
def mutEphemeral(individual, mode):
"""This operator works on the constants of the tree *individual*. In
*mode* ``"one"``, it will change the value of one of the individual
ephemeral constants by calling its generator function. In *mode*
``"all"``, it will change the value of **all** the ephemeral constants.
:param individual: The normal or typed tree to be mutated.
:param mode: A string to indicate to change ``"one"`` or ``"all"``
ephemeral constants.
:returns: A tuple of one tree.
"""
if mode not in ["one", "all"]:
raise ValueError("Mode must be one of \"one\" or \"all\"")
ephemerals_idx = [index
for index, node in enumerate(individual)
if isinstance(node, Ephemeral)]
if len(ephemerals_idx) > 0:
if mode == "one":
ephemerals_idx = (random.choice(ephemerals_idx),)
for i in ephemerals_idx:
individual[i] = type(individual[i])()
return individual,
def mutInsert(individual, pset):
"""Inserts a new branch at a random position in *individual*. The subtree
at the chosen position is used as child node of the created subtree, in
that way, it is really an insertion rather than a replacement. Note that
the original subtree will become one of the children of the new primitive
inserted, but not perforce the first (its position is randomly selected if
the new primitive has more than one child).
:param individual: The normal or typed tree to be mutated.
:returns: A tuple of one tree.
"""
index = random.randrange(len(individual))
node = individual[index]
slice_ = individual.searchSubtree(index)
choice = random.choice
# As we want to keep the current node as children of the new one,
# it must accept the return value of the current node
primitives = [p for p in pset.primitives[node.ret] if node.ret in p.args]
if len(primitives) == 0:
return individual,
new_node = choice(primitives)
new_subtree = [None] * len(new_node.args)
position = choice([i for i, a in enumerate(new_node.args) if a == node.ret])
for i, arg_type in enumerate(new_node.args):
if i != position:
term = choice(pset.terminals[arg_type])
if isclass(term):
term = term()
new_subtree[i] = term
new_subtree[position:position + 1] = individual[slice_]
new_subtree.insert(0, new_node)
individual[slice_] = new_subtree
return individual,
def mutShrink(individual):
"""This operator shrinks the *individual* by chosing randomly a branch and
replacing it with one of the branch's arguments (also randomly chosen).
:param individual: The tree to be shrinked.
:returns: A tuple of one tree.
"""
# We don't want to "shrink" the root
if len(individual) < 3 or individual.height <= 1:
return individual,
iprims = []
for i, node in enumerate(individual[1:], 1):
if isinstance(node, Primitive) and node.ret in node.args:
iprims.append((i, node))
if len(iprims) != 0:
index, prim = random.choice(iprims)
arg_idx = random.choice([i for i, type_ in enumerate(prim.args) if type_ == prim.ret])
rindex = index + 1
for _ in range(arg_idx + 1):
rslice = individual.searchSubtree(rindex)
subtree = individual[rslice]
rindex += len(subtree)
slice_ = individual.searchSubtree(index)
individual[slice_] = subtree
return individual,
######################################
# GP bloat control decorators #
######################################
def staticLimit(key, max_value):
"""Implement a static limit on some measurement on a GP tree, as defined
by Koza in [Koza1989]. It may be used to decorate both crossover and
mutation operators. When an invalid (over the limit) child is generated,
it is simply replaced by one of its parents, randomly selected.
This operator can be used to avoid memory errors occuring when the tree
gets higher than 90 levels (as Python puts a limit on the call stack
depth), because it can ensure that no tree higher than this limit will ever
be accepted in the population, except if it was generated at initialization
time.
:param key: The function to use in order the get the wanted value. For
instance, on a GP tree, ``operator.attrgetter('height')`` may
be used to set a depth limit, and ``len`` to set a size limit.
:param max_value: The maximum value allowed for the given measurement.
:returns: A decorator that can be applied to a GP operator using \
:func:`~deap.base.Toolbox.decorate`
.. note::
If you want to reproduce the exact behavior intended by Koza, set
*key* to ``operator.attrgetter('height')`` and *max_value* to 17.
.. [Koza1989] J.R. Koza, Genetic Programming - On the Programming of
Computers by Means of Natural Selection (MIT Press,
Cambridge, MA, 1992)
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
keep_inds = [copy.deepcopy(ind) for ind in args]
new_inds = list(func(*args, **kwargs))
for i, ind in enumerate(new_inds):
if key(ind) > max_value:
new_inds[i] = random.choice(keep_inds)
return new_inds
return wrapper
return decorator
######################################
# GP bloat control algorithms #
######################################
def harm(population, toolbox, cxpb, mutpb, ngen,
alpha, beta, gamma, rho, nbrindsmodel=-1, mincutoff=20,
stats=None, halloffame=None, verbose=__debug__):
"""Implement bloat control on a GP evolution using HARM-GP, as defined in
[Gardner2015]. It is implemented in the form of an evolution algorithm
(similar to :func:`~deap.algorithms.eaSimple`).
:param population: A list of individuals.
:param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution
operators.
:param cxpb: The probability of mating two individuals.
:param mutpb: The probability of mutating an individual.
:param ngen: The number of generation.
:param alpha: The HARM *alpha* parameter.
:param beta: The HARM *beta* parameter.
:param gamma: The HARM *gamma* parameter.
:param rho: The HARM *rho* parameter.
:param nbrindsmodel: The number of individuals to generate in order to
model the natural distribution. -1 is a special
value which uses the equation proposed in
[Gardner2015] to set the value of this parameter :
max(2000, len(population))
:param mincutoff: The absolute minimum value for the cutoff point. It is
used to ensure that HARM does not shrink the population
too much at the beginning of the evolution. The default
value is usually fine.
:param stats: A :class:`~deap.tools.Statistics` object that is updated
inplace, optional.
:param halloffame: A :class:`~deap.tools.HallOfFame` object that will
contain the best individuals, optional.
:param verbose: Whether or not to log the statistics.
:returns: The final population
:returns: A class:`~deap.tools.Logbook` with the statistics of the
evolution
This function expects the :meth:`toolbox.mate`, :meth:`toolbox.mutate`,
:meth:`toolbox.select` and :meth:`toolbox.evaluate` aliases to be
registered in the toolbox.
.. note::
The recommended values for the HARM-GP parameters are *alpha=0.05*,
*beta=10*, *gamma=0.25*, *rho=0.9*. However, these parameters can be
adjusted to perform better on a specific problem (see the relevant
paper for tuning information). The number of individuals used to
model the natural distribution and the minimum cutoff point are less
important, their default value being effective in most cases.
.. [Gardner2015] M.-A. Gardner, C. Gagne, and M. Parizeau, Controlling
Code Growth by Dynamically Shaping the Genotype Size Distribution,
Genetic Programming and Evolvable Machines, 2015,
DOI 10.1007/s10710-015-9242-8
"""
def _genpop(n, pickfrom=[], acceptfunc=lambda s: True, producesizes=False):
# Generate a population of n individuals, using individuals in
# *pickfrom* if possible, with a *acceptfunc* acceptance function.
# If *producesizes* is true, also return a list of the produced
# individuals sizes.
# This function is used 1) to generate the natural distribution
# (in this case, pickfrom and acceptfunc should be let at their
# default values) and 2) to generate the final population, in which
# case pickfrom should be the natural population previously generated
# and acceptfunc a function implementing the HARM-GP algorithm.
producedpop = []
producedpopsizes = []
while len(producedpop) < n:
if len(pickfrom) > 0:
# If possible, use the already generated
# individuals (more efficient)
aspirant = pickfrom.pop()
if acceptfunc(len(aspirant)):
producedpop.append(aspirant)
if producesizes:
producedpopsizes.append(len(aspirant))
else:
opRandom = random.random()
if opRandom < cxpb:
# Crossover
aspirant1, aspirant2 = toolbox.mate(*map(toolbox.clone,
toolbox.select(population, 2)))
del aspirant1.fitness.values, aspirant2.fitness.values
if acceptfunc(len(aspirant1)):
producedpop.append(aspirant1)
if producesizes:
producedpopsizes.append(len(aspirant1))
if len(producedpop) < n and acceptfunc(len(aspirant2)):
producedpop.append(aspirant2)
if producesizes:
producedpopsizes.append(len(aspirant2))
else:
aspirant = toolbox.clone(toolbox.select(population, 1)[0])
if opRandom - cxpb < mutpb:
# Mutation
aspirant = toolbox.mutate(aspirant)[0]
del aspirant.fitness.values
if acceptfunc(len(aspirant)):
producedpop.append(aspirant)
if producesizes:
producedpopsizes.append(len(aspirant))
if producesizes:
return producedpop, producedpopsizes
else:
return producedpop
halflifefunc = lambda x: (x * float(alpha) + beta)
if nbrindsmodel == -1:
nbrindsmodel = max(2000, len(population))
logbook = tools.Logbook()
logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in population if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
if halloffame is not None:
halloffame.update(population)
record = stats.compile(population) if stats else {}
logbook.record(gen=0, nevals=len(invalid_ind), **record)
if verbose:
print logbook.stream
# Begin the generational process
for gen in range(1, ngen + 1):
# Estimation population natural distribution of sizes
naturalpop, naturalpopsizes = _genpop(nbrindsmodel, producesizes=True)
naturalhist = [0] * (max(naturalpopsizes) + 3)
for indsize in naturalpopsizes:
# Kernel density estimation application
naturalhist[indsize] += 0.4
naturalhist[indsize - 1] += 0.2
naturalhist[indsize + 1] += 0.2
naturalhist[indsize + 2] += 0.1
if indsize - 2 >= 0:
naturalhist[indsize - 2] += 0.1
# Normalization
naturalhist = [val * len(population) / nbrindsmodel for val in naturalhist]
# Cutoff point selection
sortednatural = sorted(naturalpop, key=lambda ind: ind.fitness)
cutoffcandidates = sortednatural[int(len(population) * rho - 1):]
# Select the cutoff point, with an absolute minimum applied
# to avoid weird cases in the first generations
cutoffsize = max(mincutoff, len(min(cutoffcandidates, key=len)))
# Compute the target distribution
targetfunc = lambda x: (gamma * len(population) * math.log(2) /
halflifefunc(x)) * math.exp(-math.log(2) *
(x - cutoffsize) / halflifefunc(x))
targethist = [naturalhist[binidx] if binidx <= cutoffsize else
targetfunc(binidx) for binidx in range(len(naturalhist))]
# Compute the probabilities distribution
probhist = [t / n if n > 0 else t for n, t in zip(naturalhist, targethist)]
probfunc = lambda s: probhist[s] if s < len(probhist) else targetfunc(s)
acceptfunc = lambda s: random.random() <= probfunc(s)
# Generate offspring using the acceptance probabilities
# previously computed
offspring = _genpop(len(population), pickfrom=naturalpop,
acceptfunc=acceptfunc, producesizes=False)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# Update the hall of fame with the generated individuals
if halloffame is not None:
halloffame.update(offspring)
# Replace the current population by the offspring
population[:] = offspring
# Append the current generation statistics to the logbook
record = stats.compile(population) if stats else {}
logbook.record(gen=gen, nevals=len(invalid_ind), **record)
if verbose:
print logbook.stream
return population, logbook
def graph(expr):
"""Construct the graph of a tree expression. The tree expression must be
valid. It returns in order a node list, an edge list, and a dictionary of
the per node labels. The node are represented by numbers, the edges are
tuples connecting two nodes (number), and the labels are values of a
dictionary for which keys are the node numbers.
:param expr: A tree expression to convert into a graph.
:returns: A node list, an edge list, and a dictionary of labels.
The returned objects can be used directly to populate a
`pygraphviz <http://networkx.lanl.gov/pygraphviz/>`_ graph::
import pygraphviz as pgv
# [...] Execution of code that produce a tree expression
nodes, edges, labels = graph(expr)
g = pgv.AGraph()
g.add_nodes_from(nodes)
g.add_edges_from(edges)
g.layout(prog="dot")
for i in nodes:
n = g.get_node(i)
n.attr["label"] = labels[i]
g.draw("tree.pdf")
or a `NetworX <http://networkx.github.com/>`_ graph::
import matplotlib.pyplot as plt
import networkx as nx
# [...] Execution of code that produce a tree expression
nodes, edges, labels = graph(expr)
g = nx.Graph()
g.add_nodes_from(nodes)
g.add_edges_from(edges)
pos = nx.graphviz_layout(g, prog="dot")
nx.draw_networkx_nodes(g, pos)
nx.draw_networkx_edges(g, pos)
nx.draw_networkx_labels(g, pos, labels)
plt.show()
.. note::
We encourage you to use `pygraphviz
<http://networkx.lanl.gov/pygraphviz/>`_ as the nodes might be plotted
out of order when using `NetworX <http://networkx.github.com/>`_.
"""
nodes = range(len(expr))
edges = list()
labels = dict()
stack = []
for i, node in enumerate(expr):
if stack:
edges.append((stack[-1][0], i))
stack[-1][1] -= 1
labels[i] = node.name if isinstance(node, Primitive) else node.value
stack.append([i, node.arity])
while stack and stack[-1][1] == 0:
stack.pop()
return nodes, edges, labels
if __name__ == "__main__":
import doctest
doctest.testmod()
| lgpl-3.0 |
idoerg/BOA | scripts/plot_tree.py | 1 | 4038 |
"""
Plot the phylogenetic trees for toxins and immunity genes
"""
import os
import sys
import site
import re
import numpy as np
import numpy.random
base_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
base_path="%s/src"%base_path
for directory_name in os.listdir(base_path):
site.addsitedir(os.path.join(base_path, directory_name))
import fasttree
import muscle
import fasta
from fasttree import FastTree
from fasttree import UnAlignedFastTree
from acc2species import AccessionToSpecies
from accessionMap import GGAccession
"""
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.cbook as cbook
from matplotlib._png import read_png
from matplotlib.offsetbox import OffsetImage
import matplotlib.gridspec as gridspec
import matplotlib.image as mpimg
"""
import numpy
import pylab
import argparse
import cPickle
from Bio import SeqIO
from Bio import Phylo
base_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
for directory_name in os.listdir(base_path):
site.addsitedir(os.path.join(base_path, directory_name))
import quorum
from collections import defaultdict
from collections import Counter
from pandas import *
from itol import *
""" Transforms operon txt file into fasta file """
def getFasta(txt,fastadb,fastaindex,fastaout):
outhandle = open(fastaout,'w')
indexer = fasta.Indexer(fastadb,fastaindex)
indexer.load()
i = 0
with open(txt,'r') as handle:
for ln in handle:
ln = ln.rstrip()
toks = ln.split('|')
acc,clrname,full_evalue,hmm_st,hmm_end,env_st,env_end,description=toks
full_evalue = float(full_evalue)
hmm_st,hmm_end,env_st,env_end = map(int,[hmm_st,hmm_end,env_st,env_end])
seq = indexer.fetch(acc,env_st,env_end)
seq = fasta.format(seq)
outhandle.write(">%s:%d %s\n%s\n"%(acc,i,description,seq))
i+=1
if __name__=="__main__":
db = "/home/mortonjt/Projects/Bacfinder/db"
quorum = "/home/mortonjt/Projects/Bacfinder/workspace/quorum"
folder = "/home/mortonjt/Projects/Bacfinder/workspace/quorum/run2"
operons= "%s/operons.txt"%folder
filtered_operons = "%s/big_operons.txt"%folder
rrnaFile = "%s/rrna2.fa"%db
itolout = "itol.txt"
itol = iTOL(operons,rrnaFile,
"operon.rrna",
"operon.align",
"operon.tree")
#itol.sizeFilter(filtered_operons,k=200 )
itol.setOperonFile(operons)
#itol.getRRNAs() #### Note: This will only get the RRNAs for chromosomal bacteriocins
#itol.buildTree(MSA=MAFFT,iters=30)
#itol.functionDistribution("functions.txt")
#itol.operonDistribution("operon.txt")
itol.functionRings("operon_rings.txt")
#toxins = quorum%"/intermediate/toxin_genes.txt"
#immunity = quorum%"/intermediate/immunity_genes.txt"
#allfna = quorum%"/data/all_trans.fna"
#allfai = quorum%"/data/all_trans.fai"
#toxinfa = "toxin.fa"
#immunityfa = "immunity.fa"
#toxin_treefile = "toxin.tree"
#immunity_treefile = "immunity.tree"
#getFasta(toxins,allfna,allfai,toxinfa)
#getFasta(immunity,allfna,allfai,immunityfa)
#if not os.path.exists(toxin_treefile):
# toxinfasttree = UnAlignedFastTree(toxinfa,toxin_treefile)
# toxinfasttree.align()
# toxinfasttree.run()
#if not os.path.exists(immunity_treefile):
# immunityfasttree = UnAlignedFastTree(immunityfa,immunity_treefile)
# immunityfasttree.align()
# immunityfasttree.run()
#
#fig = plt.figure(1)
#plt.suptitle("Toxin phylogeny",fontsize=22)
#tree=Phylo.read(toxin_treefile,"newick")
#Phylo.draw(tree,show_confidence=False,do_show=False)
#fig = plt.figure(2)
#plt.suptitle("Immunity phylogeny",fontsize=22)
#tree=Phylo.read(immunity_treefile,"newick")
#Phylo.draw(tree,show_confidence=False,do_show=False)
pass
| gpl-3.0 |
ldirer/scikit-learn | examples/manifold/plot_manifold_sphere.py | 89 | 5055 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=============================================
Manifold Learning methods on a severed sphere
=============================================
An application of the different :ref:`manifold` techniques
on a spherical data-set. Here one can see the use of
dimensionality reduction in order to gain some intuition
regarding the manifold learning methods. Regarding the dataset,
the poles are cut from the sphere, as well as a thin slice down its
side. This enables the manifold learning techniques to
'spread it open' whilst projecting it onto two dimensions.
For a similar example, where the methods are applied to the
S-curve dataset, see :ref:`sphx_glr_auto_examples_manifold_plot_compare_methods.py`
Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is
to find a low-dimensional representation of the data (here 2D) in
which the distances respect well the distances in the original
high-dimensional space, unlike other manifold-learning algorithms,
it does not seeks an isotropic representation of the data in
the low-dimensional space. Here the manifold problem matches fairly
that of representing a flat map of the Earth, as with
`map projection <https://en.wikipedia.org/wiki/Map_projection>`_
"""
# Author: Jaques Grobler <[email protected]>
# License: BSD 3 clause
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold
from sklearn.utils import check_random_state
# Next line to silence pyflakes.
Axes3D
# Variables for manifold learning.
n_neighbors = 10
n_samples = 1000
# Create our sphere.
random_state = check_random_state(0)
p = random_state.rand(n_samples) * (2 * np.pi - 0.55)
t = random_state.rand(n_samples) * np.pi
# Sever the poles from the sphere.
indices = ((t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8))))
colors = p[indices]
x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \
np.sin(t[indices]) * np.sin(p[indices]), \
np.cos(t[indices])
# Plot our dataset.
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(251, projection='3d')
ax.scatter(x, y, z, c=p[indices], cmap=plt.cm.rainbow)
ax.view_init(40, -10)
sphere_data = np.array([x, y, z]).T
# Perform Locally Linear Embedding Manifold learning
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
trans_data = manifold\
.LocallyLinearEmbedding(n_neighbors, 2,
method=method).fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Isomap Manifold learning.
t0 = time()
trans_data = manifold.Isomap(n_neighbors, n_components=2)\
.fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % ('ISO', t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % ('Isomap', t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Multi-dimensional scaling.
t0 = time()
mds = manifold.MDS(2, max_iter=100, n_init=1)
trans_data = mds.fit_transform(sphere_data).T
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Spectral Embedding.
t0 = time()
se = manifold.SpectralEmbedding(n_components=2,
n_neighbors=n_neighbors)
trans_data = se.fit_transform(sphere_data).T
t1 = time()
print("Spectral Embedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("Spectral Embedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform t-distributed stochastic neighbor embedding.
t0 = time()
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
trans_data = tsne.fit_transform(sphere_data).T
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(2, 5, 10)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
ssanderson/numpy | numpy/lib/polynomial.py | 1 | 38275 | """
Functions to operate on polynomials.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
ones)
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros
from numpy.lib.type_check import iscomplex, real, imag, mintypecode
from numpy.linalg import eigvals, lstsq, inv
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Compute polynomial values.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Or a square matrix object:
>>> np.poly(np.matrix(P))
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
dt = seq_of_zeros.dtype
# Let object arrays slip through, e.g. for arbitrary precision
if dt != object:
seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))
else:
raise ValueError("input must be 1d or non-empty square 2d array.")
if len(seq_of_zeros) == 0:
return 1.0
dt = seq_of_zeros.dtype
a = ones((1,), dtype=dt)
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt),
mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())):
a = a.real.copy()
return a
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by::
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like
Rank-1 array of polynomial coefficients.
Returns
-------
out : ndarray
An array containing the roots of the polynomial.
Raises
------
ValueError
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with a given sequence
of roots.
polyval : Compute polynomial values.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if p.ndim != 1:
raise ValueError("Input must be a rank-1 array.")
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0,:] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : array_like or poly1d
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : list of `m` scalars or scalar, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError("Order of integral must be positive (see polyder)")
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError(
"k must be a scalar or a rank-1 array of length 1 or >m.")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
if m < 0:
raise ValueError("Order of derivative must be positive (see polyint)")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (M,), optional
Weights to apply to the y-coordinates of the sample points. For
gaussian uncertainties, use 1/sigma (not 1/sigma**2).
cov : bool, optional
Return the estimate and the covariance matrix of the estimate
If full is True, then cov is not returned.
Returns
-------
p : ndarray, shape (deg + 1,) or (deg + 1, K)
Polynomial coefficients, highest power first. If `y` was 2-D, the
coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond
Present only if `full` = True. Residuals of the least-squares fit,
the effective rank of the scaled Vandermonde coefficient matrix,
its singular values, and the specified value of `rcond`. For more
details, see `linalg.lstsq`.
V : ndarray, shape (M,M) or (M,M,K)
Present only if `full` = False and `cov`=True. The covariance
matrix of the polynomial coefficient estimates. The diagonal of
this matrix are the variance estimates for each coefficient. If y
is a 2-D array, then the covariance matrix for the `k`-th data set
are in ``V[:,:,k]``
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Compute polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
...
x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
http://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if x.shape[0] != y.shape[0]:
raise TypeError("expected x and y to have same length")
# set rcond
if rcond is None:
rcond = len(x)*finfo(x.dtype).eps
# set up least squares equation for powers of x
lhs = vander(x, order)
rhs = y
# apply weighting
if w is not None:
w = NX.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
lhs *= w[:, NX.newaxis]
if rhs.ndim == 2:
rhs *= w[:, NX.newaxis]
else:
rhs *= w
# scale lhs to improve condition number and solve
scale = NX.sqrt((lhs*lhs).sum(axis=0))
lhs /= scale
c, resids, rank, s = lstsq(lhs, rhs, rcond)
c = (c.T/scale).T # broadcast scale coefficients
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning, stacklevel=2)
if full:
return c, resids, rank, s, rcond
elif cov:
Vbase = inv(dot(lhs.T, lhs))
Vbase /= NX.outer(scale, scale)
# Some literature ignores the extra -2.0 factor in the denominator, but
# it is included here because the covariance of Multivariate Student-T
# (which is implied by a Bayesian uncertainty analysis) includes it.
# Plus, it gives a slightly more conservative estimate of uncertainty.
if len(x) <= order + 2:
raise ValueError("the number of data points must exceed order + 2 "
"for Bayesian estimate the covariance matrix")
fac = resids / (len(x) - order - 2.0)
if y.ndim == 1:
return c, Vbase * fac
else:
return c, Vbase[:,:, NX.newaxis] * fac
else:
return c
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, an array of numbers, or an instance of poly1d, at
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = y * x + p[i]
return y
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print(p1)
1 x + 2
>>> print(p2)
2
9 x + 5 x + 4
>>> print(np.polyadd(p1, p2))
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
convolve : Array convolution. Same output as polymul, but has parameter
for overlap mode.
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print(p1)
2
1 x + 2 x + 3
>>> print(p2)
2
9 x + 5 x + 1
>>> print(np.polymul(p1, p2))
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1, a2 = poly1d(a1), poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.copy()
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while True:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2) + len(toadd2) > wrap) or
(len(line1) + len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print(np.poly1d(p))
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print(p)
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
__hash__ = None
@property
def coeffs(self):
""" The polynomial coefficients """
return self._coeffs
@property
def variable(self):
""" The name of the polynomial variable """
return self._variable
# calculated attributes
@property
def order(self):
""" The order or degree of the polynomial """
return len(self._coeffs) - 1
@property
def roots(self):
""" The roots of the polynomial, where self(x) == 0 """
return roots(self._coeffs)
# alias attributes
r = roots
c = coef = coefficients = coeffs
o = order
def __init__(self, c_or_r, r=False, variable=None):
if isinstance(c_or_r, poly1d):
self._variable = c_or_r._variable
self._coeffs = c_or_r._coeffs
if set(c_or_r.__dict__) - set(self.__dict__):
msg = ("In the future extra properties will not be copied "
"across when constructing one poly1d from another")
warnings.warn(msg, FutureWarning, stacklevel=2)
self.__dict__.update(c_or_r.__dict__)
if variable is not None:
self._variable = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if c_or_r.ndim > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self._coeffs = c_or_r
if variable is None:
variable = 'x'
self._variable = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError("Power to non-negative integers only.")
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
if not isinstance(other, poly1d):
return NotImplemented
if self.coeffs.shape != other.coeffs.shape:
return False
return (self.coeffs == other.coeffs).all()
def __ne__(self, other):
if not isinstance(other, poly1d):
return NotImplemented
return not self.__eq__(other)
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self._coeffs = NX.concatenate((zr, self.coeffs))
ind = 0
self._coeffs[ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always', RankWarning)
| bsd-3-clause |
GoogleCloudPlatform/ml-on-gcp | tutorials/tensorflow/mlflow_gcp/trainer/utils.py | 1 | 9404 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to download and preprocess the Census data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from builtins import list
from builtins import zip
from builtins import map
from builtins import str
from six.moves import urllib
import tempfile
import os
import string
import random
import numpy as np
import pandas as pd
import tensorflow as tf
# Storage directory
DATA_DIR = os.path.join(tempfile.gettempdir(), 'census_data')
# Download options.
DATA_URL = ('https://storage.googleapis.com/cloud-samples-data/ml-engine/census'
'/data')
TRAINING_FILE = 'adult.data.csv'
EVAL_FILE = 'adult.test.csv'
TRAINING_URL = '%s/%s' % (DATA_URL, TRAINING_FILE)
EVAL_URL = '%s/%s' % (DATA_URL, EVAL_FILE)
# These are the features in the dataset.
# Dataset information: https://archive.ics.uci.edu/ml/datasets/census+income
_CSV_COLUMNS = [
'age', 'workclass', 'fnlwgt', 'education', 'education_num',
'marital_status', 'occupation', 'relationship', 'race', 'gender',
'capital_gain', 'capital_loss', 'hours_per_week', 'native_country',
'income_bracket'
]
# This is the label (target) we want to predict.
_LABEL_COLUMN = 'income_bracket'
# These are columns we will not use as features for training. There are many
# reasons not to use certain attributes of data for training. Perhaps their
# values are noisy or inconsistent, or perhaps they encode bias that we do not
# want our model to learn. For a deep dive into the features of this Census
# dataset and the challenges they pose, see the Introduction to ML Fairness
# Notebook: https://colab.research.google.com/github/google/eng-edu/blob
# /master/ml/cc/exercises/intro_to_fairness.ipynb
UNUSED_COLUMNS = ['fnlwgt', 'education', 'gender']
_CATEGORICAL_TYPES = {
'workclass': pd.api.types.CategoricalDtype(categories=[
'Federal-gov', 'Local-gov', 'Never-worked', 'Private', 'Self-emp-inc',
'Self-emp-not-inc', 'State-gov', 'Without-pay'
]),
'marital_status': pd.api.types.CategoricalDtype(categories=[
'Divorced', 'Married-AF-spouse', 'Married-civ-spouse',
'Married-spouse-absent', 'Never-married', 'Separated', 'Widowed'
]),
'occupation': pd.api.types.CategoricalDtype([
'Adm-clerical', 'Armed-Forces', 'Craft-repair', 'Exec-managerial',
'Farming-fishing', 'Handlers-cleaners', 'Machine-op-inspct',
'Other-service', 'Priv-house-serv', 'Prof-specialty', 'Protective-serv',
'Sales', 'Tech-support', 'Transport-moving'
]),
'relationship': pd.api.types.CategoricalDtype(categories=[
'Husband', 'Not-in-family', 'Other-relative', 'Own-child', 'Unmarried',
'Wife'
]),
'race': pd.api.types.CategoricalDtype(categories=[
'Amer-Indian-Eskimo', 'Asian-Pac-Islander', 'Black', 'Other', 'White'
]),
'native_country': pd.api.types.CategoricalDtype(categories=[
'Cambodia', 'Canada', 'China', 'Columbia', 'Cuba', 'Dominican-Republic',
'Ecuador', 'El-Salvador', 'England', 'France', 'Germany', 'Greece',
'Guatemala', 'Haiti', 'Holand-Netherlands', 'Honduras', 'Hong',
'Hungary',
'India', 'Iran', 'Ireland', 'Italy', 'Jamaica', 'Japan', 'Laos',
'Mexico',
'Nicaragua', 'Outlying-US(Guam-USVI-etc)', 'Peru', 'Philippines',
'Poland',
'Portugal', 'Puerto-Rico', 'Scotland', 'South', 'Taiwan', 'Thailand',
'Trinadad&Tobago', 'United-States', 'Vietnam', 'Yugoslavia'
]),
'income_bracket': pd.api.types.CategoricalDtype(categories=[
'<=50K', '>50K'
])
}
def _download_and_clean_file(filename, url):
"""Downloads data from url, and makes changes to match the CSV format.
The CSVs may use spaces after the comma delimters (non-standard) or include
rows which do not represent well-formed examples. This function strips out
some of these problems.
Args:
filename: filename to save url to
url: URL of resource to download
"""
temp_file, _ = urllib.request.urlretrieve(url)
with tf.io.gfile.GFile(temp_file, 'r') as temp_file_object:
with tf.io.gfile.GFile(filename, 'w') as file_object:
for line in temp_file_object:
line = line.strip()
line = line.replace(', ', ',')
if not line or ',' not in line:
continue
if line[-1] == '.':
line = line[:-1]
line += '\n'
file_object.write(line)
tf.io.gfile.remove(temp_file)
def download(data_dir):
"""Downloads census data if it is not already present.
Args:
data_dir: directory where we will access/save the census data
"""
tf.io.gfile.makedirs(data_dir)
training_file_path = os.path.join(data_dir, TRAINING_FILE)
if not tf.io.gfile.exists(training_file_path):
_download_and_clean_file(training_file_path, TRAINING_URL)
eval_file_path = os.path.join(data_dir, EVAL_FILE)
if not tf.io.gfile.exists(eval_file_path):
_download_and_clean_file(eval_file_path, EVAL_URL)
return training_file_path, eval_file_path
def preprocess(dataframe):
"""Converts categorical features to numeric. Removes unused columns.
Args:
dataframe: Pandas dataframe with raw data
Returns:
Dataframe with preprocessed data
"""
dataframe = dataframe.drop(columns=UNUSED_COLUMNS)
# Convert integer valued (numeric) columns to floating point
numeric_columns = dataframe.select_dtypes(['int64']).columns
dataframe[numeric_columns] = dataframe[numeric_columns].astype('float32')
# Convert categorical columns to numeric
cat_columns = dataframe.select_dtypes(['object']).columns
dataframe[cat_columns] = dataframe[cat_columns].apply(lambda x: x.astype(
_CATEGORICAL_TYPES[x.name]))
dataframe[cat_columns] = dataframe[cat_columns].apply(lambda x: x.cat.codes)
return dataframe
def standardize(dataframe):
"""Scales numerical columns using their means and standard deviation to get
z-scores: the mean of each numerical column becomes 0, and the standard
deviation becomes 1. This can help the model converge during training.
Args:
dataframe: Pandas dataframe
Returns:
Input dataframe with the numerical columns scaled to z-scores
"""
dtypes = list(zip(dataframe.dtypes.index, map(str, dataframe.dtypes)))
# Normalize numeric columns.
for column, dtype in dtypes:
if dtype == 'float32':
dataframe[column] -= dataframe[column].mean()
dataframe[column] /= dataframe[column].std()
return dataframe
def load_data(training_file_path, eval_file_path, *args, **kwargs):
"""Loads data into preprocessed (train_x, train_y, eval_y, eval_y)
dataframes.
Args:
training_file_path: GCS file location for training files
eval_file_path: GCS file location for eval files
Returns:
A tuple (train_x, train_y, eval_x, eval_y), where train_x and eval_x are
Pandas dataframes with features for training and train_y and eval_y are
numpy arrays with the corresponding labels.
"""
# TODO Download and clean custom files.
print('Location train file: {}, eval file {}'.format(training_file_path,
eval_file_path))
training_file_path, eval_file_path = download(DATA_DIR)
# This census data uses the value '?' for missing entries. We use
# na_values to
# find ? and set it to NaN.
# https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv
# .html
train_df = pd.read_csv(training_file_path, names=_CSV_COLUMNS,
na_values='?')
eval_df = pd.read_csv(eval_file_path, names=_CSV_COLUMNS, na_values='?')
train_df = preprocess(train_df)
eval_df = preprocess(eval_df)
# Split train and eval data with labels. The pop method copies and removes
# the label column from the dataframe.
train_x, train_y = train_df, train_df.pop(_LABEL_COLUMN)
eval_x, eval_y = eval_df, eval_df.pop(_LABEL_COLUMN)
# Join train_x and eval_x to normalize on overall means and standard
# deviations. Then separate them again.
all_x = pd.concat([train_x, eval_x], keys=['train', 'eval'])
all_x = standardize(all_x)
train_x, eval_x = all_x.xs('train'), all_x.xs('eval')
# Reshape label columns for use with tf.data.Dataset
train_y = np.asarray(train_y).astype('float32').reshape((-1, 1))
eval_y = np.asarray(eval_y).astype('float32').reshape((-1, 1))
return train_x, train_y, eval_x, eval_y
def get_run_id(size):
chars = string.ascii_uppercase + string.ascii_lowercase
return ''.join(random.choice(chars) for _ in range(size))
| apache-2.0 |
miltonsarria/dsp-python | filters/FIR/display_onda.py | 2 | 2621 | #Milton Orlando Sarria Paja
#USC 2017
#analisis de fourier para datos de ondas periodicas guardados en archivos de texto
#la onda se ha contaminado con ruido sinusoidal
import matplotlib.pyplot as plt
import numpy as np
#importar la funcion para hacer el analisis de fourier
from fourierFunc import fourierAn
##################################################################################
##################################################################################
def text2numbers(lines):
x=[]
for line in lines:
#de cada linea tomar por separado cada numero e ignorar el cambio de linea
#que es el ultimo caracter
data = line[:-1].split(',')
#convertir a flotantes y apilar en la lista X
x.append([float(data[0]), float(data[1])])
#convertir la lista a un arreglo numpy
x=np.array(x)
return x
##################################################################################
##################################################################################
#CODIGO PRINCIPAL
##################################################################################
##################################################################################
#lectura del archivo de texto donde se encuentran los datos
file_name='sierra_ruido_sin.txt'
#file_name='sierra_ruido_ran.txt'
hf = open(file_name,'r')
lines=hf.readlines()
hf.close()
#convertir los datos a valores numericos, la primer columna es el tiempo, la segunda datos
#se separan en vectores diferentes y se calcula la frecuencia de muestreo
x=text2numbers(lines)
t=x[:,0]
y=x[:,1]
#fs es el inverso del periodo de muestreo, o la separacion, en tiempo, entre dos muestras consecutivas
Ts=t[1]-t[0] #incremento discreto o tiempo de muestreo Ts
Fs=1.0/(Ts) #frecuencia de muestreo (inverso del tiempo de muestreo)
dF=Fs/x.size #incremento discreto en frecuencia
print(Fs)
plt.figure(1)
plt.plot(x[:,0],x[:,1])
plt.xlabel('tiempo - secs')
plt.ylabel('amplitud - volts')
###########################################
#aplicar transformada de fourier a los datos
absY,mYdb,pY=fourierAn(y)
#vector de frecuencias, desde -fs/2 a fs/2 (-pi<w<pi)
f=np.linspace(-Fs/2,Fs/2,absY.size)
#se grafican las 3000 muestras del lado positivo y 1000 muestras del lado negativo, esto se puede modificar a conveniencia
#y dependiendo del numero de muestras que se tengan
Nplot=3000
plt.figure(2)
plt.subplot(311)
plt.plot(f,absY)
plt.ylabel('|Y|')
plt.subplot(312)
plt.plot(f,mYdb)
plt.ylabel('En dB (20log10(|Y|)')
plt.subplot(313)
plt.plot(f,pY)
plt.ylabel('Angulo o fase rad')
plt.xlabel('Frecuencia - Hz')
plt.show()
| mit |
sourabhdattawad/BuildingMachineLearningSystemsWithPython | ch03/noise_analysis.py | 24 | 2412 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import sklearn.datasets
groups = [
'comp.graphics', 'comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware',
'comp.sys.mac.hardware', 'comp.windows.x', 'sci.space']
train_data = sklearn.datasets.fetch_20newsgroups(subset="train",
categories=groups)
labels = train_data.target
num_clusters = 50 # sp.unique(labels).shape[0]
import nltk.stem
english_stemmer = nltk.stem.SnowballStemmer('english')
from sklearn.feature_extraction.text import TfidfVectorizer
class StemmedTfidfVectorizer(TfidfVectorizer):
def build_analyzer(self):
analyzer = super(TfidfVectorizer, self).build_analyzer()
return lambda doc: (english_stemmer.stem(w) for w in analyzer(doc))
vectorizer = StemmedTfidfVectorizer(min_df=10, max_df=0.5,
stop_words='english', decode_error='ignore'
)
vectorized = vectorizer.fit_transform(train_data.data)
post_group = zip(train_data.data, train_data.target)
# Create a list of tuples that can be sorted by
# the length of the posts
all = [(len(post[0]), post[0], train_data.target_names[post[1]])
for post in post_group]
graphics = sorted([post for post in all if post[2] == 'comp.graphics'])
print(graphics[5])
# (245, 'From: [email protected]\nSubject: test....(sorry)\nOrganization:
# The University of Birmingham, United Kingdom\nLines: 1\nNNTP-Posting-Host: ibm3090.bham.ac.uk
# \n\n==============================================================================\n',
# 'comp.graphics')
noise_post = graphics[5][1]
analyzer = vectorizer.build_analyzer()
print(list(analyzer(noise_post)))
useful = set(analyzer(noise_post)).intersection(vectorizer.get_feature_names())
print(sorted(useful))
# ['ac', 'birmingham', 'host', 'kingdom', 'nntp', 'sorri', 'test', 'uk', 'unit', 'univers']
for term in sorted(useful):
print('IDF(%s)=%.2f' % (term,
vectorizer._tfidf.idf_[vectorizer.vocabulary_[term]]))
# IDF(ac)=3.51
# IDF(birmingham)=6.77
# IDF(host)=1.74
# IDF(kingdom)=6.68
# IDF(nntp)=1.77
# IDF(sorri)=4.14
# IDF(test)=3.83
# IDF(uk)=3.70
# IDF(unit)=4.42
# IDF(univers)=1.91
| mit |
saskartt/kandi | plotHovmollerDiagram.py | 1 | 1559 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import argparse
import numpy as np
from scipy.ndimage import interpolation
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from PIL import Image
from kandiLib import *
from settings import *
'''
Plot Hovmöller diagram.
'''
#==========================================================#
parser = argparse.ArgumentParser(prog='plotHovmollerDiagram.py',
description='''Plot Hovmöller diagram.''')
parser.add_argument("-f", "--file", type=str, help="Name of the input netCDF4 file(s).")
parser.add_argument("-var", "--variable", type=str, help="Variable to be plotted")
args = parser.parse_args()
#==========================================================#
ds = openDataSet(args.file)
var, xdims, __, __ = readVariableFromMask(ds,[0., 18000.], args.variable)
t_inds, = np.where(np.logical_and(ds.variables['time'][:] >= 0., ds.variables['time'][:] <= 18000.))
var=var[:,5,:,256]
tdims=ds.variables['time'][t_inds]
tdims = interpolation.zoom(tdims,0.25)
xdims = interpolation.zoom(xdims,0.25)
var = interpolation.zoom(var,0.25)
X,Y = np.meshgrid(xdims,tdims)
cmap = mpl.cm.get_cmap("viridis")
bounds = np.linspace(0,12,11)
norm = colors.BoundaryNorm(boundaries=bounds, ncolors=256)
print(np.amax(var))
cp=plt.contourf(X,Y,var,8,vmin=0., vmax=12.,cmap=cmap)
plt.ylabel("Time [s]")
plt.xlabel("y [m]")
cbar = plt.colorbar(cp,ticks=bounds)
plt.title(u"y-directional diagram of variable {}".format(args.variable))
plt.show()
| mit |
vortex-ape/scikit-learn | sklearn/utils/tests/test_extmath.py | 4 | 26405 | # Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
import pytest
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.fixes import np_version
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _incremental_mean_and_var
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.extmath import softmax
from sklearn.utils.extmath import stable_cumsum
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_array_equal(mode, mode2)
assert_array_equal(score, score2)
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
@ignore_warnings # Test deprecated backport to be removed in 0.21
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def check_randomized_svd_low_rank(dtype):
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
decimal = 5 if dtype == np.float32 else 7
dtype = np.dtype(dtype)
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0).astype(dtype, copy=False)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
# Convert the singular values to the specific dtype
U = U.astype(dtype, copy=False)
s = s.astype(dtype, copy=False)
V = V.astype(dtype, copy=False)
for normalizer in ['auto', 'LU', 'QR']: # 'none' would not be stable
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(
X, k, power_iteration_normalizer=normalizer, random_state=0)
# If the input dtype is float, then the output dtype is float of the
# same bit size (f32 is not upcast to f64)
# But if the input dtype is int, the output dtype is float64
if dtype.kind == 'f':
assert Ua.dtype == dtype
assert sa.dtype == dtype
assert Va.dtype == dtype
else:
assert Ua.dtype == np.float64
assert sa.dtype == np.float64
assert Va.dtype == np.float64
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the
# real rank of the matrix
assert_almost_equal(s[:k], sa, decimal=decimal)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va),
decimal=decimal)
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = \
randomized_svd(X, k, power_iteration_normalizer=normalizer,
random_state=0)
if dtype.kind == 'f':
assert Ua.dtype == dtype
assert sa.dtype == dtype
assert Va.dtype == dtype
else:
assert Ua.dtype.kind == 'f'
assert sa.dtype.kind == 'f'
assert Va.dtype.kind == 'f'
assert_almost_equal(s[:rank], sa[:rank], decimal=decimal)
@pytest.mark.parametrize('dtype',
(np.int32, np.int64, np.float32, np.float64))
def test_randomized_svd_low_rank_all_dtypes(dtype):
check_randomized_svd_low_rank(dtype)
@ignore_warnings # extmath.norm is deprecated to be removed in 0.21
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
# Check the warning with an int array and np.dot potential overflow
assert_warns_message(
UserWarning, 'Array type is integer, np.dot may '
'overflow. Data should be float type to avoid this issue',
squared_norm, X.astype(int))
@pytest.mark.parametrize('dtype',
(np.float32, np.float64))
def test_row_norms(dtype):
X = np.random.RandomState(42).randn(100, 100)
if dtype is np.float32:
precision = 4
else:
precision = 5
X = X.astype(dtype)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True),
precision)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X), precision)
for csr_index_dtype in [np.int32, np.int64]:
Xcsr = sparse.csr_matrix(X, dtype=dtype)
# csr_matrix will use int32 indices by default,
# up-casting those to int64 when necessary
if csr_index_dtype is np.int64:
Xcsr.indptr = Xcsr.indptr.astype(csr_index_dtype)
Xcsr.indices = Xcsr.indices.astype(csr_index_dtype)
assert Xcsr.indices.dtype == csr_index_dtype
assert Xcsr.indptr.dtype == csr_index_dtype
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True),
precision)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr),
precision)
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.1,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate
# method without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer,
random_state=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.01)
# compute the singular values of X using the fast approximate
# method with iterated power method
_, sap, _ = randomized_svd(X, k,
power_iteration_normalizer=normalizer,
random_state=0)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method
# with iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5,
power_iteration_normalizer=normalizer)
# the iterated power method is still managing to get most of the
# structure at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limited impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_randomized_svd_power_iteration_normalizer():
# randomized_svd with power_iteration_normalized='none' diverges for
# large number of power iterations on this dataset
rng = np.random.RandomState(42)
X = make_low_rank_matrix(100, 500, effective_rank=50, random_state=rng)
X += 3 * rng.randint(0, 2, size=X.shape)
n_components = 50
# Check that it diverges with many (non-normalized) power iterations
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
U, s, V = randomized_svd(X, n_components, n_iter=20,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_20 = linalg.norm(A, ord='fro')
assert_greater(np.abs(error_2 - error_20), 100)
for normalizer in ['LU', 'QR', 'auto']:
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer=normalizer,
random_state=0)
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
for i in [5, 10, 50]:
U, s, V = randomized_svd(X, n_components, n_iter=i,
power_iteration_normalizer=normalizer,
random_state=0)
A = X - U.dot(np.diag(s).dot(V))
error = linalg.norm(A, ord='fro')
assert_greater(15, np.abs(error_2 - error))
def test_randomized_svd_sparse_warnings():
# randomized_svd throws a warning for lil and dok matrix
rng = np.random.RandomState(42)
X = make_low_rank_matrix(50, 20, effective_rank=10, random_state=rng)
n_components = 5
for cls in (sparse.lil_matrix, sparse.dok_matrix):
X = cls(X)
assert_warns_message(
sparse.SparseEfficiencyWarning,
"Calculating SVD of a {} is expensive. "
"csr_matrix is more efficient.".format(cls.__name__),
randomized_svd, X, n_components, n_iter=1,
power_iteration_normalizer='none')
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_randomized_svd_sign_flip_with_transpose():
# Check if the randomized_svd sign flipping is always done based on u
# irrespective of transpose.
# See https://github.com/scikit-learn/scikit-learn/issues/5608
# for more details.
def max_loading_is_positive(u, v):
"""
returns bool tuple indicating if the values maximising np.abs
are positive across all rows for u and across all columns for v.
"""
u_based = (np.abs(u).max(axis=0) == u.max(axis=0)).all()
v_based = (np.abs(v).max(axis=1) == v.max(axis=1)).all()
return u_based, v_based
mat = np.arange(10 * 8).reshape(10, -1)
# Without transpose
u_flipped, _, v_flipped = randomized_svd(mat, 3, flip_sign=True)
u_based, v_based = max_loading_is_positive(u_flipped, v_flipped)
assert_true(u_based)
assert_false(v_based)
# With transpose
u_flipped_with_transpose, _, v_flipped_with_transpose = randomized_svd(
mat, 3, flip_sign=True, transpose=True)
u_based, v_based = max_loading_is_positive(
u_flipped_with_transpose, v_flipped_with_transpose)
assert_true(u_based)
assert_false(v_based)
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
def naive_log_logistic(x):
return np.log(1 / (1 + np.exp(-x)))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = np.full(X1.shape[1], X1.shape[0], dtype=np.int32)
final_means, final_variances, final_count = \
_incremental_mean_and_var(X2, old_means, old_variances,
old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
def test_incremental_mean_and_variance_ignore_nan():
old_means = np.array([535., 535., 535., 535.])
old_variances = np.array([4225., 4225., 4225., 4225.])
old_sample_count = np.array([2, 2, 2, 2], dtype=np.int32)
X = np.array([[170, 170, 170, 170],
[430, 430, 430, 430],
[300, 300, 300, 300]])
X_nan = np.array([[170, np.nan, 170, 170],
[np.nan, 170, 430, 430],
[430, 430, np.nan, 300],
[300, 300, 300, np.nan]])
X_means, X_variances, X_count = _incremental_mean_and_var(
X, old_means, old_variances, old_sample_count)
X_nan_means, X_nan_variances, X_nan_count = _incremental_mean_and_var(
X_nan, old_means, old_variances, old_sample_count)
assert_allclose(X_nan_means, X_means)
assert_allclose(X_nan_variances, X_variances)
assert_allclose(X_nan_count, X_count)
@skip_if_32bit
def test_incremental_variance_numerical_stability():
# Test Youngs and Cramer incremental variance formulas.
def np_var(A):
return A.var(axis=0)
# Naive one pass variance computation - not numerically stable
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
def one_pass_var(X):
n = X.shape[0]
exp_x2 = (X ** 2).sum(axis=0) / n
expx_2 = (X.sum(axis=0) / n) ** 2
return exp_x2 - expx_2
# Two-pass algorithm, stable.
# We use it as a benchmark. It is not an online algorithm
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm
def two_pass_var(X):
mean = X.mean(axis=0)
Y = X.copy()
return np.mean((Y - mean)**2, axis=0)
# Naive online implementation
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
# This works only for chunks for size 1
def naive_mean_variance_update(x, last_mean, last_variance,
last_sample_count):
updated_sample_count = (last_sample_count + 1)
samples_ratio = last_sample_count / float(updated_sample_count)
updated_mean = x / updated_sample_count + last_mean * samples_ratio
updated_variance = last_variance * samples_ratio + \
(x - last_mean) * (x - updated_mean) / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
# We want to show a case when one_pass_var has error > 1e-3 while
# _batch_mean_variance_update has less.
tol = 200
n_features = 2
n_samples = 10000
x1 = np.array(1e8, dtype=np.float64)
x2 = np.log(1e-5, dtype=np.float64)
A0 = np.full((n_samples // 2, n_features), x1, dtype=np.float64)
A1 = np.full((n_samples // 2, n_features), x2, dtype=np.float64)
A = np.vstack((A0, A1))
# Older versions of numpy have different precision
# In some old version, np.var is not stable
if np.abs(np_var(A) - two_pass_var(A)).max() < 1e-6:
stable_var = np_var
else:
stable_var = two_pass_var
# Naive one pass var: >tol (=1063)
assert_greater(np.abs(stable_var(A) - one_pass_var(A)).max(), tol)
# Starting point for online algorithms: after A0
# Naive implementation: >tol (436)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
naive_mean_variance_update(A1[i, :], mean, var, n)
assert_equal(n, A.shape[0])
# the mean is also slightly unstable
assert_greater(np.abs(A.mean(axis=0) - mean).max(), 1e-6)
assert_greater(np.abs(stable_var(A) - var).max(), tol)
# Robust implementation: <tol (177)
mean, var = A0[0, :], np.zeros(n_features)
n = np.full(n_features, n_samples // 2, dtype=np.int32)
for i in range(A1.shape[0]):
mean, var, n = \
_incremental_mean_and_var(A1[i, :].reshape((1, A1.shape[1])),
mean, var, n)
assert_array_equal(n, A.shape[0])
assert_array_almost_equal(A.mean(axis=0), mean)
assert_greater(tol, np.abs(stable_var(A) - var).max())
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = np.full(batch.shape[1], batch.shape[0],
dtype=np.int32)
else:
result = _incremental_mean_and_var(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_array_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
def test_softmax():
rng = np.random.RandomState(0)
X = rng.randn(3, 5)
exp_X = np.exp(X)
sum_exp_X = np.sum(exp_X, axis=1).reshape((-1, 1))
assert_array_almost_equal(softmax(X), exp_X / sum_exp_X)
def test_stable_cumsum():
if np_version < (1, 9):
raise SkipTest("Sum is as unstable as cumsum for numpy < 1.9")
assert_array_equal(stable_cumsum([1, 2, 3]), np.cumsum([1, 2, 3]))
r = np.random.RandomState(0).rand(100000)
assert_warns(RuntimeWarning, stable_cumsum, r, rtol=0, atol=0)
# test axis parameter
A = np.random.RandomState(36).randint(1000, size=(5, 5, 5))
assert_array_equal(stable_cumsum(A, axis=0), np.cumsum(A, axis=0))
assert_array_equal(stable_cumsum(A, axis=1), np.cumsum(A, axis=1))
assert_array_equal(stable_cumsum(A, axis=2), np.cumsum(A, axis=2))
| bsd-3-clause |
chenjun0210/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/dataframe.py | 85 | 4704 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A DataFrame is a container for ingesting and preprocessing data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from .series import Series
from .transform import Transform
class DataFrame(object):
"""A DataFrame is a container for ingesting and preprocessing data."""
def __init__(self):
self._columns = {}
def columns(self):
"""Set of the column names."""
return frozenset(self._columns.keys())
def __len__(self):
"""The number of columns in the DataFrame."""
return len(self._columns)
def assign(self, **kwargs):
"""Adds columns to DataFrame.
Args:
**kwargs: assignments of the form key=value where key is a string
and value is an `inflow.Series`, a `pandas.Series` or a numpy array.
Raises:
TypeError: keys are not strings.
TypeError: values are not `inflow.Series`, `pandas.Series` or
`numpy.ndarray`.
TODO(jamieas): pandas assign method returns a new DataFrame. Consider
switching to this behavior, changing the name or adding in_place as an
argument.
"""
for k, v in kwargs.items():
if not isinstance(k, str):
raise TypeError("The only supported type for keys is string; got %s" %
type(k))
if v is None:
del self._columns[k]
elif isinstance(v, Series):
self._columns[k] = v
elif isinstance(v, Transform) and v.input_valency() == 0:
self._columns[k] = v()
else:
raise TypeError(
"Column in assignment must be an inflow.Series, inflow.Transform,"
" or None; got type '%s'." % type(v).__name__)
def select_columns(self, keys):
"""Returns a new DataFrame with a subset of columns.
Args:
keys: A list of strings. Each should be the name of a column in the
DataFrame.
Returns:
A new DataFrame containing only the specified columns.
"""
result = type(self)()
for key in keys:
result[key] = self._columns[key]
return result
def exclude_columns(self, exclude_keys):
"""Returns a new DataFrame with all columns not excluded via exclude_keys.
Args:
exclude_keys: A list of strings. Each should be the name of a column in
the DataFrame. These columns will be excluded from the result.
Returns:
A new DataFrame containing all columns except those specified.
"""
result = type(self)()
for key, value in self._columns.items():
if key not in exclude_keys:
result[key] = value
return result
def __getitem__(self, key):
"""Indexing functionality for DataFrames.
Args:
key: a string or an iterable of strings.
Returns:
A Series or list of Series corresponding to the given keys.
"""
if isinstance(key, str):
return self._columns[key]
elif isinstance(key, collections.Iterable):
for i in key:
if not isinstance(i, str):
raise TypeError("Expected a String; entry %s has type %s." %
(i, type(i).__name__))
return [self.__getitem__(i) for i in key]
raise TypeError(
"Invalid index: %s of type %s. Only strings or lists of strings are "
"supported." % (key, type(key)))
def __setitem__(self, key, value):
if isinstance(key, str):
key = [key]
if isinstance(value, Series):
value = [value]
self.assign(**dict(zip(key, value)))
def __delitem__(self, key):
if isinstance(key, str):
key = [key]
value = [None for _ in key]
self.assign(**dict(zip(key, value)))
def build(self, **kwargs):
# We do not allow passing a cache here, because that would encourage
# working around the rule that DataFrames cannot be expected to be
# synced with each other (e.g., they shuffle independently).
cache = {}
tensors = {name: c.build(cache, **kwargs)
for name, c in self._columns.items()}
return tensors
| apache-2.0 |
multidis/bitQuant | bitquant/data/clss.py | 2 | 3127 | import conv
import tools
from ..api.clss import api
from ..sql.clss import sql
from pandas import DataFrame
import time as tm
class data(object):
def __init__(self):
self.a = api()
self.s = sql()
self.jobs = []
self.trd = DataFrame()
self.prc = DataFrame()
def add_trades(self, exchange, symbol, limit='', since='',
auto_since='no', ping_limit=1.0):
job = {'exchange':exchange,'symbol':symbol}
self.a.add_job(exchange, symbol, 'trades', limit=limit, since=since,
auto_since=auto_since, ping_limit=ping_limit)
self.jobs.append(job)
def get_trades(self, exchange='', symbol='', start=''):
trd = self.s.select('trades',exchange=exchange,
symbol=symbol,start=start)
self.trd = self.trd.append(trd)
self.trd = self.trd.drop_duplicates(['tid','exchange'])
def run_trades(self, exchange, symbol):
self.trd = self.trd.append(self.a.run(exchange,symbol,'trades'))
self.trd = self.trd.drop_duplicates(['tid','exchange'])
def run_loop(self, time, to_sql=60, log='no'):
dump = tm.time() + to_sql
end = tm.time() + time
while tm.time() < end:
for job in self.jobs:
self.run_trades(job['exchange'], job['symbol'])
if tm.time() > dump:
dump = tm.time() + to_sql
self.to_sql(log)
def get_price(self, exchange='', symbol='',
freq='', start=''):
prc = self.s.select('price',exchange=exchange,symbol=symbol,
freq=freq, start=start)
self.prc = self.prc.append(prc)
self.prc = self.prc.drop_duplicates(['timestamp','exchange',
'symbol','freq'])
return prc
def run_price(self, exchange, symbol, freq, label='left',
from_sql='no', start=''):
if from_sql == 'yes':
self.get_trades(exchange, symbol, start=start)
# get_trades already applied exchange, symbol checks
trd = self.trd
else:
trd = self.trd
if exchange <> '':
trd = self.trd[self.trd.exchange==exchange]
if symbol <> '':
trd = self.trd[self.trd.symbol==symbol]
trd = tools.date_index(trd)
if len(trd.index) > 0:
prc = conv.olhcv(trd, freq, label=label)
self.prc = self.prc.append(prc)
self.prc = self.prc.drop_duplicates(['timestamp','exchange',
'symbol','freq'])
def to_sql(self, log='no'):
if 'sent' in self.trd:
trd = self.trd[self.trd['sent']<>'yes']
else:
trd = self.trd
if 'sent' in self.prc:
prc = self.prc[self.prc['sent']<>'yes']
else:
prc = self.prc
self.s.insert('trades', trd)
self.s.insert('price', prc)
if log == 'yes':
print trd
print prc
self.trd['sent'] = 'yes'
self.prc['sent'] = 'yes'
| mit |
dsm054/pandas | pandas/core/reshape/merge.py | 2 | 61308 | """
SQL-style merge routines
"""
import copy
import string
import warnings
import numpy as np
from pandas._libs import hashtable as libhashtable, join as libjoin, lib
import pandas.compat as compat
from pandas.compat import filter, lzip, map, range, zip
from pandas.errors import MergeError
from pandas.util._decorators import Appender, Substitution
from pandas.core.dtypes.common import (
ensure_float64, ensure_int64, ensure_object, is_array_like, is_bool,
is_bool_dtype, is_categorical_dtype, is_datetime64_dtype,
is_datetime64tz_dtype, is_datetimelike, is_dtype_equal, is_float_dtype,
is_int64_dtype, is_int_or_datetime_dtype, is_integer, is_integer_dtype,
is_list_like, is_number, is_numeric_dtype, needs_i8_conversion)
from pandas.core.dtypes.missing import isnull, na_value_for_dtype
from pandas import Categorical, DataFrame, Index, MultiIndex, Series, Timedelta
import pandas.core.algorithms as algos
from pandas.core.arrays.categorical import _recode_for_categories
import pandas.core.common as com
from pandas.core.frame import _merge_doc
from pandas.core.internals import (
concatenate_block_managers, items_overlap_with_suffix)
import pandas.core.sorting as sorting
from pandas.core.sorting import is_int64_overflow_possible
@Substitution('\nleft : DataFrame')
@Appender(_merge_doc, indents=0)
def merge(left, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True, indicator=False,
validate=None):
op = _MergeOperation(left, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, sort=sort, suffixes=suffixes,
copy=copy, indicator=indicator,
validate=validate)
return op.get_result()
if __debug__:
merge.__doc__ = _merge_doc % '\nleft : DataFrame'
def _groupby_and_merge(by, on, left, right, _merge_pieces,
check_duplicates=True):
"""
groupby & merge; we are always performing a left-by type operation
Parameters
----------
by: field to group
on: duplicates field
left: left frame
right: right frame
_merge_pieces: function for merging
check_duplicates: boolean, default True
should we check & clean duplicates
"""
pieces = []
if not isinstance(by, (list, tuple)):
by = [by]
lby = left.groupby(by, sort=False)
# if we can groupby the rhs
# then we can get vastly better perf
try:
# we will check & remove duplicates if indicated
if check_duplicates:
if on is None:
on = []
elif not isinstance(on, (list, tuple)):
on = [on]
if right.duplicated(by + on).any():
right = right.drop_duplicates(by + on, keep='last')
rby = right.groupby(by, sort=False)
except KeyError:
rby = None
for key, lhs in lby:
if rby is None:
rhs = right
else:
try:
rhs = right.take(rby.indices[key])
except KeyError:
# key doesn't exist in left
lcols = lhs.columns.tolist()
cols = lcols + [r for r in right.columns
if r not in set(lcols)]
merged = lhs.reindex(columns=cols)
merged.index = range(len(merged))
pieces.append(merged)
continue
merged = _merge_pieces(lhs, rhs)
# make sure join keys are in the merged
# TODO, should _merge_pieces do this?
for k in by:
try:
if k in merged:
merged[k] = key
except KeyError:
pass
pieces.append(merged)
# preserve the original order
# if we have a missing piece this can be reset
from pandas.core.reshape.concat import concat
result = concat(pieces, ignore_index=True)
result = result.reindex(columns=pieces[0].columns, copy=False)
return result, lby
def merge_ordered(left, right, on=None,
left_on=None, right_on=None,
left_by=None, right_by=None,
fill_method=None, suffixes=('_x', '_y'),
how='outer'):
"""Perform merge with optional filling/interpolation designed for ordered
data like time series data. Optionally perform group-wise merge (see
examples)
Parameters
----------
left : DataFrame
right : DataFrame
on : label or list
Field names to join on. Must be found in both DataFrames.
left_on : label or list, or array-like
Field names to join on in left DataFrame. Can be a vector or list of
vectors of the length of the DataFrame to use a particular vector as
the join key instead of columns
right_on : label or list, or array-like
Field names to join on in right DataFrame or vector/list of vectors per
left_on docs
left_by : column name or list of column names
Group left DataFrame by group columns and merge piece by piece with
right DataFrame
right_by : column name or list of column names
Group right DataFrame by group columns and merge piece by piece with
left DataFrame
fill_method : {'ffill', None}, default None
Interpolation method for data
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively
how : {'left', 'right', 'outer', 'inner'}, default 'outer'
* left: use only keys from left frame (SQL: left outer join)
* right: use only keys from right frame (SQL: right outer join)
* outer: use union of keys from both frames (SQL: full outer join)
* inner: use intersection of keys from both frames (SQL: inner join)
.. versionadded:: 0.19.0
Examples
--------
>>> A >>> B
key lvalue group key rvalue
0 a 1 a 0 b 1
1 c 2 a 1 c 2
2 e 3 a 2 d 3
3 a 1 b
4 c 2 b
5 e 3 b
>>> merge_ordered(A, B, fill_method='ffill', left_by='group')
group key lvalue rvalue
0 a a 1 NaN
1 a b 1 1.0
2 a c 2 2.0
3 a d 2 3.0
4 a e 3 3.0
5 b a 1 NaN
6 b b 1 1.0
7 b c 2 2.0
8 b d 2 3.0
9 b e 3 3.0
Returns
-------
merged : DataFrame
The output type will the be same as 'left', if it is a subclass
of DataFrame.
See also
--------
merge
merge_asof
"""
def _merger(x, y):
# perform the ordered merge operation
op = _OrderedMerge(x, y, on=on, left_on=left_on, right_on=right_on,
suffixes=suffixes, fill_method=fill_method,
how=how)
return op.get_result()
if left_by is not None and right_by is not None:
raise ValueError('Can only group either left or right frames')
elif left_by is not None:
result, _ = _groupby_and_merge(left_by, on, left, right,
lambda x, y: _merger(x, y),
check_duplicates=False)
elif right_by is not None:
result, _ = _groupby_and_merge(right_by, on, right, left,
lambda x, y: _merger(y, x),
check_duplicates=False)
else:
result = _merger(left, right)
return result
def merge_asof(left, right, on=None,
left_on=None, right_on=None,
left_index=False, right_index=False,
by=None, left_by=None, right_by=None,
suffixes=('_x', '_y'),
tolerance=None,
allow_exact_matches=True,
direction='backward'):
"""Perform an asof merge. This is similar to a left-join except that we
match on nearest key rather than equal keys.
Both DataFrames must be sorted by the key.
For each row in the left DataFrame:
- A "backward" search selects the last row in the right DataFrame whose
'on' key is less than or equal to the left's key.
- A "forward" search selects the first row in the right DataFrame whose
'on' key is greater than or equal to the left's key.
- A "nearest" search selects the row in the right DataFrame whose 'on'
key is closest in absolute distance to the left's key.
The default is "backward" and is compatible in versions below 0.20.0.
The direction parameter was added in version 0.20.0 and introduces
"forward" and "nearest".
Optionally match on equivalent keys with 'by' before searching with 'on'.
.. versionadded:: 0.19.0
Parameters
----------
left : DataFrame
right : DataFrame
on : label
Field name to join on. Must be found in both DataFrames.
The data MUST be ordered. Furthermore this must be a numeric column,
such as datetimelike, integer, or float. On or left_on/right_on
must be given.
left_on : label
Field name to join on in left DataFrame.
right_on : label
Field name to join on in right DataFrame.
left_index : boolean
Use the index of the left DataFrame as the join key.
.. versionadded:: 0.19.2
right_index : boolean
Use the index of the right DataFrame as the join key.
.. versionadded:: 0.19.2
by : column name or list of column names
Match on these columns before performing merge operation.
left_by : column name
Field names to match on in the left DataFrame.
.. versionadded:: 0.19.2
right_by : column name
Field names to match on in the right DataFrame.
.. versionadded:: 0.19.2
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively.
tolerance : integer or Timedelta, optional, default None
Select asof tolerance within this range; must be compatible
with the merge index.
allow_exact_matches : boolean, default True
- If True, allow matching with the same 'on' value
(i.e. less-than-or-equal-to / greater-than-or-equal-to)
- If False, don't match the same 'on' value
(i.e., strictly less-than / strictly greater-than)
direction : 'backward' (default), 'forward', or 'nearest'
Whether to search for prior, subsequent, or closest matches.
.. versionadded:: 0.20.0
Returns
-------
merged : DataFrame
Examples
--------
>>> left = pd.DataFrame({'a': [1, 5, 10], 'left_val': ['a', 'b', 'c']})
>>> left
a left_val
0 1 a
1 5 b
2 10 c
>>> right = pd.DataFrame({'a': [1, 2, 3, 6, 7],
... 'right_val': [1, 2, 3, 6, 7]})
>>> right
a right_val
0 1 1
1 2 2
2 3 3
3 6 6
4 7 7
>>> pd.merge_asof(left, right, on='a')
a left_val right_val
0 1 a 1
1 5 b 3
2 10 c 7
>>> pd.merge_asof(left, right, on='a', allow_exact_matches=False)
a left_val right_val
0 1 a NaN
1 5 b 3.0
2 10 c 7.0
>>> pd.merge_asof(left, right, on='a', direction='forward')
a left_val right_val
0 1 a 1.0
1 5 b 6.0
2 10 c NaN
>>> pd.merge_asof(left, right, on='a', direction='nearest')
a left_val right_val
0 1 a 1
1 5 b 6
2 10 c 7
We can use indexed DataFrames as well.
>>> left = pd.DataFrame({'left_val': ['a', 'b', 'c']}, index=[1, 5, 10])
>>> left
left_val
1 a
5 b
10 c
>>> right = pd.DataFrame({'right_val': [1, 2, 3, 6, 7]},
... index=[1, 2, 3, 6, 7])
>>> right
right_val
1 1
2 2
3 3
6 6
7 7
>>> pd.merge_asof(left, right, left_index=True, right_index=True)
left_val right_val
1 a 1
5 b 3
10 c 7
Here is a real-world times-series example
>>> quotes
time ticker bid ask
0 2016-05-25 13:30:00.023 GOOG 720.50 720.93
1 2016-05-25 13:30:00.023 MSFT 51.95 51.96
2 2016-05-25 13:30:00.030 MSFT 51.97 51.98
3 2016-05-25 13:30:00.041 MSFT 51.99 52.00
4 2016-05-25 13:30:00.048 GOOG 720.50 720.93
5 2016-05-25 13:30:00.049 AAPL 97.99 98.01
6 2016-05-25 13:30:00.072 GOOG 720.50 720.88
7 2016-05-25 13:30:00.075 MSFT 52.01 52.03
>>> trades
time ticker price quantity
0 2016-05-25 13:30:00.023 MSFT 51.95 75
1 2016-05-25 13:30:00.038 MSFT 51.95 155
2 2016-05-25 13:30:00.048 GOOG 720.77 100
3 2016-05-25 13:30:00.048 GOOG 720.92 100
4 2016-05-25 13:30:00.048 AAPL 98.00 100
By default we are taking the asof of the quotes
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker')
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
We only asof within 2ms between the quote time and the trade time
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker',
... tolerance=pd.Timedelta('2ms'))
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 NaN NaN
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
We only asof within 10ms between the quote time and the trade time
and we exclude exact matches on time. However *prior* data will
propagate forward
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker',
... tolerance=pd.Timedelta('10ms'),
... allow_exact_matches=False)
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 GOOG 720.77 100 NaN NaN
3 2016-05-25 13:30:00.048 GOOG 720.92 100 NaN NaN
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
See also
--------
merge
merge_ordered
"""
op = _AsOfMerge(left, right,
on=on, left_on=left_on, right_on=right_on,
left_index=left_index, right_index=right_index,
by=by, left_by=left_by, right_by=right_by,
suffixes=suffixes,
how='asof', tolerance=tolerance,
allow_exact_matches=allow_exact_matches,
direction=direction)
return op.get_result()
# TODO: transformations??
# TODO: only copy DataFrames when modification necessary
class _MergeOperation(object):
"""
Perform a database (SQL) merge operation between two DataFrame objects
using either columns as keys or their row indexes
"""
_merge_type = 'merge'
def __init__(self, left, right, how='inner', on=None,
left_on=None, right_on=None, axis=1,
left_index=False, right_index=False, sort=True,
suffixes=('_x', '_y'), copy=True, indicator=False,
validate=None):
left = validate_operand(left)
right = validate_operand(right)
self.left = self.orig_left = left
self.right = self.orig_right = right
self.how = how
self.axis = axis
self.on = com.maybe_make_list(on)
self.left_on = com.maybe_make_list(left_on)
self.right_on = com.maybe_make_list(right_on)
self.copy = copy
self.suffixes = suffixes
self.sort = sort
self.left_index = left_index
self.right_index = right_index
self.indicator = indicator
if isinstance(self.indicator, compat.string_types):
self.indicator_name = self.indicator
elif isinstance(self.indicator, bool):
self.indicator_name = '_merge' if self.indicator else None
else:
raise ValueError(
'indicator option can only accept boolean or string arguments')
if not is_bool(left_index):
raise ValueError(
'left_index parameter must be of type bool, not '
'{left_index}'.format(left_index=type(left_index)))
if not is_bool(right_index):
raise ValueError(
'right_index parameter must be of type bool, not '
'{right_index}'.format(right_index=type(right_index)))
# warn user when merging between different levels
if left.columns.nlevels != right.columns.nlevels:
msg = ('merging between different levels can give an unintended '
'result ({left} levels on the left, {right} on the right)'
).format(left=left.columns.nlevels,
right=right.columns.nlevels)
warnings.warn(msg, UserWarning)
self._validate_specification()
# note this function has side effects
(self.left_join_keys,
self.right_join_keys,
self.join_names) = self._get_merge_keys()
# validate the merge keys dtypes. We may need to coerce
# to avoid incompat dtypes
self._maybe_coerce_merge_keys()
# If argument passed to validate,
# check if columns specified as unique
# are in fact unique.
if validate is not None:
self._validate(validate)
def get_result(self):
if self.indicator:
self.left, self.right = self._indicator_pre_merge(
self.left, self.right)
join_index, left_indexer, right_indexer = self._get_join_info()
ldata, rdata = self.left._data, self.right._data
lsuf, rsuf = self.suffixes
llabels, rlabels = items_overlap_with_suffix(ldata.items, lsuf,
rdata.items, rsuf)
lindexers = {1: left_indexer} if left_indexer is not None else {}
rindexers = {1: right_indexer} if right_indexer is not None else {}
result_data = concatenate_block_managers(
[(ldata, lindexers), (rdata, rindexers)],
axes=[llabels.append(rlabels), join_index],
concat_axis=0, copy=self.copy)
typ = self.left._constructor
result = typ(result_data).__finalize__(self, method=self._merge_type)
if self.indicator:
result = self._indicator_post_merge(result)
self._maybe_add_join_keys(result, left_indexer, right_indexer)
self._maybe_restore_index_levels(result)
return result
def _indicator_pre_merge(self, left, right):
columns = left.columns.union(right.columns)
for i in ['_left_indicator', '_right_indicator']:
if i in columns:
raise ValueError("Cannot use `indicator=True` option when "
"data contains a column named {name}"
.format(name=i))
if self.indicator_name in columns:
raise ValueError(
"Cannot use name of an existing column for indicator column")
left = left.copy()
right = right.copy()
left['_left_indicator'] = 1
left['_left_indicator'] = left['_left_indicator'].astype('int8')
right['_right_indicator'] = 2
right['_right_indicator'] = right['_right_indicator'].astype('int8')
return left, right
def _indicator_post_merge(self, result):
result['_left_indicator'] = result['_left_indicator'].fillna(0)
result['_right_indicator'] = result['_right_indicator'].fillna(0)
result[self.indicator_name] = Categorical((result['_left_indicator'] +
result['_right_indicator']),
categories=[1, 2, 3])
result[self.indicator_name] = (
result[self.indicator_name]
.cat.rename_categories(['left_only', 'right_only', 'both']))
result = result.drop(labels=['_left_indicator', '_right_indicator'],
axis=1)
return result
def _maybe_restore_index_levels(self, result):
"""
Restore index levels specified as `on` parameters
Here we check for cases where `self.left_on` and `self.right_on` pairs
each reference an index level in their respective DataFrames. The
joined columns corresponding to these pairs are then restored to the
index of `result`.
**Note:** This method has side effects. It modifies `result` in-place
Parameters
----------
result: DataFrame
merge result
Returns
-------
None
"""
names_to_restore = []
for name, left_key, right_key in zip(self.join_names,
self.left_on,
self.right_on):
if (self.orig_left._is_level_reference(left_key) and
self.orig_right._is_level_reference(right_key) and
name not in result.index.names):
names_to_restore.append(name)
if names_to_restore:
result.set_index(names_to_restore, inplace=True)
def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
left_has_missing = None
right_has_missing = None
keys = zip(self.join_names, self.left_on, self.right_on)
for i, (name, lname, rname) in enumerate(keys):
if not _should_fill(lname, rname):
continue
take_left, take_right = None, None
if name in result:
if left_indexer is not None and right_indexer is not None:
if name in self.left:
if left_has_missing is None:
left_has_missing = (left_indexer == -1).any()
if left_has_missing:
take_right = self.right_join_keys[i]
if not is_dtype_equal(result[name].dtype,
self.left[name].dtype):
take_left = self.left[name]._values
elif name in self.right:
if right_has_missing is None:
right_has_missing = (right_indexer == -1).any()
if right_has_missing:
take_left = self.left_join_keys[i]
if not is_dtype_equal(result[name].dtype,
self.right[name].dtype):
take_right = self.right[name]._values
elif left_indexer is not None \
and is_array_like(self.left_join_keys[i]):
take_left = self.left_join_keys[i]
take_right = self.right_join_keys[i]
if take_left is not None or take_right is not None:
if take_left is None:
lvals = result[name]._values
else:
lfill = na_value_for_dtype(take_left.dtype)
lvals = algos.take_1d(take_left, left_indexer,
fill_value=lfill)
if take_right is None:
rvals = result[name]._values
else:
rfill = na_value_for_dtype(take_right.dtype)
rvals = algos.take_1d(take_right, right_indexer,
fill_value=rfill)
# if we have an all missing left_indexer
# make sure to just use the right values
mask = left_indexer == -1
if mask.all():
key_col = rvals
else:
key_col = Index(lvals).where(~mask, rvals)
if result._is_label_reference(name):
result[name] = key_col
elif result._is_level_reference(name):
if isinstance(result.index, MultiIndex):
idx_list = [result.index.get_level_values(level_name)
if level_name != name else key_col
for level_name in result.index.names]
result.set_index(idx_list, inplace=True)
else:
result.index = Index(key_col, name=name)
else:
result.insert(i, name or 'key_{i}'.format(i=i), key_col)
def _get_join_indexers(self):
""" return the join indexers """
return _get_join_indexers(self.left_join_keys,
self.right_join_keys,
sort=self.sort,
how=self.how)
def _get_join_info(self):
left_ax = self.left._data.axes[self.axis]
right_ax = self.right._data.axes[self.axis]
if self.left_index and self.right_index and self.how != 'asof':
join_index, left_indexer, right_indexer = \
left_ax.join(right_ax, how=self.how, return_indexers=True,
sort=self.sort)
elif self.right_index and self.how == 'left':
join_index, left_indexer, right_indexer = \
_left_join_on_index(left_ax, right_ax, self.left_join_keys,
sort=self.sort)
elif self.left_index and self.how == 'right':
join_index, right_indexer, left_indexer = \
_left_join_on_index(right_ax, left_ax, self.right_join_keys,
sort=self.sort)
else:
(left_indexer,
right_indexer) = self._get_join_indexers()
if self.right_index:
if len(self.left) > 0:
join_index = self.left.index.take(left_indexer)
else:
join_index = self.right.index.take(right_indexer)
left_indexer = np.array([-1] * len(join_index))
elif self.left_index:
if len(self.right) > 0:
join_index = self.right.index.take(right_indexer)
else:
join_index = self.left.index.take(left_indexer)
right_indexer = np.array([-1] * len(join_index))
else:
join_index = Index(np.arange(len(left_indexer)))
if len(join_index) == 0:
join_index = join_index.astype(object)
return join_index, left_indexer, right_indexer
def _get_merge_keys(self):
"""
Note: has side effects (copy/delete key columns)
Parameters
----------
left
right
on
Returns
-------
left_keys, right_keys
"""
left_keys = []
right_keys = []
join_names = []
right_drop = []
left_drop = []
left, right = self.left, self.right
is_lkey = lambda x: is_array_like(x) and len(x) == len(left)
is_rkey = lambda x: is_array_like(x) and len(x) == len(right)
# Note that pd.merge_asof() has separate 'on' and 'by' parameters. A
# user could, for example, request 'left_index' and 'left_by'. In a
# regular pd.merge(), users cannot specify both 'left_index' and
# 'left_on'. (Instead, users have a MultiIndex). That means the
# self.left_on in this function is always empty in a pd.merge(), but
# a pd.merge_asof(left_index=True, left_by=...) will result in a
# self.left_on array with a None in the middle of it. This requires
# a work-around as designated in the code below.
# See _validate_specification() for where this happens.
# ugh, spaghetti re #733
if _any(self.left_on) and _any(self.right_on):
for lk, rk in zip(self.left_on, self.right_on):
if is_lkey(lk):
left_keys.append(lk)
if is_rkey(rk):
right_keys.append(rk)
join_names.append(None) # what to do?
else:
if rk is not None:
right_keys.append(
right._get_label_or_level_values(rk))
join_names.append(rk)
else:
# work-around for merge_asof(right_index=True)
right_keys.append(right.index)
join_names.append(right.index.name)
else:
if not is_rkey(rk):
if rk is not None:
right_keys.append(
right._get_label_or_level_values(rk))
else:
# work-around for merge_asof(right_index=True)
right_keys.append(right.index)
if lk is not None and lk == rk:
# avoid key upcast in corner case (length-0)
if len(left) > 0:
right_drop.append(rk)
else:
left_drop.append(lk)
else:
right_keys.append(rk)
if lk is not None:
left_keys.append(left._get_label_or_level_values(lk))
join_names.append(lk)
else:
# work-around for merge_asof(left_index=True)
left_keys.append(left.index)
join_names.append(left.index.name)
elif _any(self.left_on):
for k in self.left_on:
if is_lkey(k):
left_keys.append(k)
join_names.append(None)
else:
left_keys.append(left._get_label_or_level_values(k))
join_names.append(k)
if isinstance(self.right.index, MultiIndex):
right_keys = [lev._values.take(lab)
for lev, lab in zip(self.right.index.levels,
self.right.index.labels)]
else:
right_keys = [self.right.index.values]
elif _any(self.right_on):
for k in self.right_on:
if is_rkey(k):
right_keys.append(k)
join_names.append(None)
else:
right_keys.append(right._get_label_or_level_values(k))
join_names.append(k)
if isinstance(self.left.index, MultiIndex):
left_keys = [lev._values.take(lab)
for lev, lab in zip(self.left.index.levels,
self.left.index.labels)]
else:
left_keys = [self.left.index.values]
if left_drop:
self.left = self.left._drop_labels_or_levels(left_drop)
if right_drop:
self.right = self.right._drop_labels_or_levels(right_drop)
return left_keys, right_keys, join_names
def _maybe_coerce_merge_keys(self):
# we have valid mergees but we may have to further
# coerce these if they are originally incompatible types
#
# for example if these are categorical, but are not dtype_equal
# or if we have object and integer dtypes
for lk, rk, name in zip(self.left_join_keys,
self.right_join_keys,
self.join_names):
if (len(lk) and not len(rk)) or (not len(lk) and len(rk)):
continue
lk_is_cat = is_categorical_dtype(lk)
rk_is_cat = is_categorical_dtype(rk)
# if either left or right is a categorical
# then the must match exactly in categories & ordered
if lk_is_cat and rk_is_cat:
if lk.is_dtype_equal(rk):
continue
elif lk_is_cat or rk_is_cat:
pass
elif is_dtype_equal(lk.dtype, rk.dtype):
continue
msg = ("You are trying to merge on {lk_dtype} and "
"{rk_dtype} columns. If you wish to proceed "
"you should use pd.concat".format(lk_dtype=lk.dtype,
rk_dtype=rk.dtype))
# if we are numeric, then allow differing
# kinds to proceed, eg. int64 and int8, int and float
# further if we are object, but we infer to
# the same, then proceed
if is_numeric_dtype(lk) and is_numeric_dtype(rk):
if lk.dtype.kind == rk.dtype.kind:
pass
# check whether ints and floats
elif is_integer_dtype(rk) and is_float_dtype(lk):
if not (lk == lk.astype(rk.dtype))[~np.isnan(lk)].all():
warnings.warn('You are merging on int and float '
'columns where the float values '
'are not equal to their int '
'representation', UserWarning)
elif is_float_dtype(rk) and is_integer_dtype(lk):
if not (rk == rk.astype(lk.dtype))[~np.isnan(rk)].all():
warnings.warn('You are merging on int and float '
'columns where the float values '
'are not equal to their int '
'representation', UserWarning)
# let's infer and see if we are ok
elif lib.infer_dtype(lk) == lib.infer_dtype(rk):
pass
# Check if we are trying to merge on obviously
# incompatible dtypes GH 9780, GH 15800
# boolean values are considered as numeric, but are still allowed
# to be merged on object boolean values
elif ((is_numeric_dtype(lk) and not is_bool_dtype(lk))
and not is_numeric_dtype(rk)):
raise ValueError(msg)
elif (not is_numeric_dtype(lk)
and (is_numeric_dtype(rk) and not is_bool_dtype(rk))):
raise ValueError(msg)
elif is_datetimelike(lk) and not is_datetimelike(rk):
raise ValueError(msg)
elif not is_datetimelike(lk) and is_datetimelike(rk):
raise ValueError(msg)
elif is_datetime64tz_dtype(lk) and not is_datetime64tz_dtype(rk):
raise ValueError(msg)
elif not is_datetime64tz_dtype(lk) and is_datetime64tz_dtype(rk):
raise ValueError(msg)
# Houston, we have a problem!
# let's coerce to object if the dtypes aren't
# categorical, otherwise coerce to the category
# dtype. If we coerced categories to object,
# then we would lose type information on some
# columns, and end up trying to merge
# incompatible dtypes. See GH 16900.
else:
if name in self.left.columns:
typ = lk.categories.dtype if lk_is_cat else object
self.left = self.left.assign(
**{name: self.left[name].astype(typ)})
if name in self.right.columns:
typ = rk.categories.dtype if rk_is_cat else object
self.right = self.right.assign(
**{name: self.right[name].astype(typ)})
def _validate_specification(self):
# Hm, any way to make this logic less complicated??
if self.on is None and self.left_on is None and self.right_on is None:
if self.left_index and self.right_index:
self.left_on, self.right_on = (), ()
elif self.left_index:
if self.right_on is None:
raise MergeError('Must pass right_on or right_index=True')
elif self.right_index:
if self.left_on is None:
raise MergeError('Must pass left_on or left_index=True')
else:
# use the common columns
common_cols = self.left.columns.intersection(
self.right.columns)
if len(common_cols) == 0:
raise MergeError(
'No common columns to perform merge on. '
'Merge options: left_on={lon}, right_on={ron}, '
'left_index={lidx}, right_index={ridx}'
.format(lon=self.left_on, ron=self.right_on,
lidx=self.left_index, ridx=self.right_index))
if not common_cols.is_unique:
raise MergeError("Data columns not unique: {common!r}"
.format(common=common_cols))
self.left_on = self.right_on = common_cols
elif self.on is not None:
if self.left_on is not None or self.right_on is not None:
raise MergeError('Can only pass argument "on" OR "left_on" '
'and "right_on", not a combination of both.')
self.left_on = self.right_on = self.on
elif self.left_on is not None:
n = len(self.left_on)
if self.right_index:
if len(self.left_on) != self.right.index.nlevels:
raise ValueError('len(left_on) must equal the number '
'of levels in the index of "right"')
self.right_on = [None] * n
elif self.right_on is not None:
n = len(self.right_on)
if self.left_index:
if len(self.right_on) != self.left.index.nlevels:
raise ValueError('len(right_on) must equal the number '
'of levels in the index of "left"')
self.left_on = [None] * n
if len(self.right_on) != len(self.left_on):
raise ValueError("len(right_on) must equal len(left_on)")
def _validate(self, validate):
# Check uniqueness of each
if self.left_index:
left_unique = self.orig_left.index.is_unique
else:
left_unique = MultiIndex.from_arrays(self.left_join_keys
).is_unique
if self.right_index:
right_unique = self.orig_right.index.is_unique
else:
right_unique = MultiIndex.from_arrays(self.right_join_keys
).is_unique
# Check data integrity
if validate in ["one_to_one", "1:1"]:
if not left_unique and not right_unique:
raise MergeError("Merge keys are not unique in either left"
" or right dataset; not a one-to-one merge")
elif not left_unique:
raise MergeError("Merge keys are not unique in left dataset;"
" not a one-to-one merge")
elif not right_unique:
raise MergeError("Merge keys are not unique in right dataset;"
" not a one-to-one merge")
elif validate in ["one_to_many", "1:m"]:
if not left_unique:
raise MergeError("Merge keys are not unique in left dataset;"
"not a one-to-many merge")
elif validate in ["many_to_one", "m:1"]:
if not right_unique:
raise MergeError("Merge keys are not unique in right dataset;"
" not a many-to-one merge")
elif validate in ['many_to_many', 'm:m']:
pass
else:
raise ValueError("Not a valid argument for validate")
def _get_join_indexers(left_keys, right_keys, sort=False, how='inner',
**kwargs):
"""
Parameters
----------
left_keys: ndarray, Index, Series
right_keys: ndarray, Index, Series
sort: boolean, default False
how: string {'inner', 'outer', 'left', 'right'}, default 'inner'
Returns
-------
tuple of (left_indexer, right_indexer)
indexers into the left_keys, right_keys
"""
from functools import partial
assert len(left_keys) == len(right_keys), \
'left_key and right_keys must be the same length'
# bind `sort` arg. of _factorize_keys
fkeys = partial(_factorize_keys, sort=sort)
# get left & right join labels and num. of levels at each location
llab, rlab, shape = map(list, zip(* map(fkeys, left_keys, right_keys)))
# get flat i8 keys from label lists
lkey, rkey = _get_join_keys(llab, rlab, shape, sort)
# factorize keys to a dense i8 space
# `count` is the num. of unique keys
# set(lkey) | set(rkey) == range(count)
lkey, rkey, count = fkeys(lkey, rkey)
# preserve left frame order if how == 'left' and sort == False
kwargs = copy.copy(kwargs)
if how == 'left':
kwargs['sort'] = sort
join_func = _join_functions[how]
return join_func(lkey, rkey, count, **kwargs)
class _OrderedMerge(_MergeOperation):
_merge_type = 'ordered_merge'
def __init__(self, left, right, on=None, left_on=None, right_on=None,
left_index=False, right_index=False, axis=1,
suffixes=('_x', '_y'), copy=True,
fill_method=None, how='outer'):
self.fill_method = fill_method
_MergeOperation.__init__(self, left, right, on=on, left_on=left_on,
left_index=left_index,
right_index=right_index,
right_on=right_on, axis=axis,
how=how, suffixes=suffixes,
sort=True # factorize sorts
)
def get_result(self):
join_index, left_indexer, right_indexer = self._get_join_info()
# this is a bit kludgy
ldata, rdata = self.left._data, self.right._data
lsuf, rsuf = self.suffixes
llabels, rlabels = items_overlap_with_suffix(ldata.items, lsuf,
rdata.items, rsuf)
if self.fill_method == 'ffill':
left_join_indexer = libjoin.ffill_indexer(left_indexer)
right_join_indexer = libjoin.ffill_indexer(right_indexer)
else:
left_join_indexer = left_indexer
right_join_indexer = right_indexer
lindexers = {
1: left_join_indexer} if left_join_indexer is not None else {}
rindexers = {
1: right_join_indexer} if right_join_indexer is not None else {}
result_data = concatenate_block_managers(
[(ldata, lindexers), (rdata, rindexers)],
axes=[llabels.append(rlabels), join_index],
concat_axis=0, copy=self.copy)
typ = self.left._constructor
result = typ(result_data).__finalize__(self, method=self._merge_type)
self._maybe_add_join_keys(result, left_indexer, right_indexer)
return result
def _asof_function(direction):
name = 'asof_join_{dir}'.format(dir=direction)
return getattr(libjoin, name, None)
def _asof_by_function(direction):
name = 'asof_join_{dir}_on_X_by_Y'.format(dir=direction)
return getattr(libjoin, name, None)
_type_casters = {
'int64_t': ensure_int64,
'double': ensure_float64,
'object': ensure_object,
}
def _get_cython_type_upcast(dtype):
""" Upcast a dtype to 'int64_t', 'double', or 'object' """
if is_integer_dtype(dtype):
return 'int64_t'
elif is_float_dtype(dtype):
return 'double'
else:
return 'object'
class _AsOfMerge(_OrderedMerge):
_merge_type = 'asof_merge'
def __init__(self, left, right, on=None, left_on=None, right_on=None,
left_index=False, right_index=False,
by=None, left_by=None, right_by=None,
axis=1, suffixes=('_x', '_y'), copy=True,
fill_method=None,
how='asof', tolerance=None,
allow_exact_matches=True,
direction='backward'):
self.by = by
self.left_by = left_by
self.right_by = right_by
self.tolerance = tolerance
self.allow_exact_matches = allow_exact_matches
self.direction = direction
_OrderedMerge.__init__(self, left, right, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, axis=axis,
how=how, suffixes=suffixes,
fill_method=fill_method)
def _validate_specification(self):
super(_AsOfMerge, self)._validate_specification()
# we only allow on to be a single item for on
if len(self.left_on) != 1 and not self.left_index:
raise MergeError("can only asof on a key for left")
if len(self.right_on) != 1 and not self.right_index:
raise MergeError("can only asof on a key for right")
if self.left_index and isinstance(self.left.index, MultiIndex):
raise MergeError("left can only have one index")
if self.right_index and isinstance(self.right.index, MultiIndex):
raise MergeError("right can only have one index")
# set 'by' columns
if self.by is not None:
if self.left_by is not None or self.right_by is not None:
raise MergeError('Can only pass by OR left_by '
'and right_by')
self.left_by = self.right_by = self.by
if self.left_by is None and self.right_by is not None:
raise MergeError('missing left_by')
if self.left_by is not None and self.right_by is None:
raise MergeError('missing right_by')
# add 'by' to our key-list so we can have it in the
# output as a key
if self.left_by is not None:
if not is_list_like(self.left_by):
self.left_by = [self.left_by]
if not is_list_like(self.right_by):
self.right_by = [self.right_by]
if len(self.left_by) != len(self.right_by):
raise MergeError('left_by and right_by must be same length')
self.left_on = self.left_by + list(self.left_on)
self.right_on = self.right_by + list(self.right_on)
# check 'direction' is valid
if self.direction not in ['backward', 'forward', 'nearest']:
raise MergeError('direction invalid: {direction}'
.format(direction=self.direction))
@property
def _asof_key(self):
""" This is our asof key, the 'on' """
return self.left_on[-1]
def _get_merge_keys(self):
# note this function has side effects
(left_join_keys,
right_join_keys,
join_names) = super(_AsOfMerge, self)._get_merge_keys()
# validate index types are the same
for i, (lk, rk) in enumerate(zip(left_join_keys, right_join_keys)):
if not is_dtype_equal(lk.dtype, rk.dtype):
raise MergeError("incompatible merge keys [{i}] {lkdtype} and "
"{rkdtype}, must be the same type"
.format(i=i, lkdtype=lk.dtype,
rkdtype=rk.dtype))
# validate tolerance; must be a Timedelta if we have a DTI
if self.tolerance is not None:
if self.left_index:
lt = self.left.index
else:
lt = left_join_keys[-1]
msg = ("incompatible tolerance {tolerance}, must be compat "
"with type {lkdtype}".format(
tolerance=type(self.tolerance),
lkdtype=lt.dtype))
if is_datetime64_dtype(lt) or is_datetime64tz_dtype(lt):
if not isinstance(self.tolerance, Timedelta):
raise MergeError(msg)
if self.tolerance < Timedelta(0):
raise MergeError("tolerance must be positive")
elif is_int64_dtype(lt):
if not is_integer(self.tolerance):
raise MergeError(msg)
if self.tolerance < 0:
raise MergeError("tolerance must be positive")
elif is_float_dtype(lt):
if not is_number(self.tolerance):
raise MergeError(msg)
if self.tolerance < 0:
raise MergeError("tolerance must be positive")
else:
raise MergeError("key must be integer, timestamp or float")
# validate allow_exact_matches
if not is_bool(self.allow_exact_matches):
msg = "allow_exact_matches must be boolean, passed {passed}"
raise MergeError(msg.format(passed=self.allow_exact_matches))
return left_join_keys, right_join_keys, join_names
def _get_join_indexers(self):
""" return the join indexers """
def flip(xs):
""" unlike np.transpose, this returns an array of tuples """
labels = list(string.ascii_lowercase[:len(xs)])
dtypes = [x.dtype for x in xs]
labeled_dtypes = list(zip(labels, dtypes))
return np.array(lzip(*xs), labeled_dtypes)
# values to compare
left_values = (self.left.index.values if self.left_index else
self.left_join_keys[-1])
right_values = (self.right.index.values if self.right_index else
self.right_join_keys[-1])
tolerance = self.tolerance
# we require sortedness and non-null values in the join keys
msg_sorted = "{side} keys must be sorted"
msg_missings = "Merge keys contain null values on {side} side"
if not Index(left_values).is_monotonic:
if isnull(left_values).any():
raise ValueError(msg_missings.format(side='left'))
else:
raise ValueError(msg_sorted.format(side='left'))
if not Index(right_values).is_monotonic:
if isnull(right_values).any():
raise ValueError(msg_missings.format(side='right'))
else:
raise ValueError(msg_sorted.format(side='right'))
# initial type conversion as needed
if needs_i8_conversion(left_values):
left_values = left_values.view('i8')
right_values = right_values.view('i8')
if tolerance is not None:
tolerance = tolerance.value
# a "by" parameter requires special handling
if self.left_by is not None:
# remove 'on' parameter from values if one existed
if self.left_index and self.right_index:
left_by_values = self.left_join_keys
right_by_values = self.right_join_keys
else:
left_by_values = self.left_join_keys[0:-1]
right_by_values = self.right_join_keys[0:-1]
# get tuple representation of values if more than one
if len(left_by_values) == 1:
left_by_values = left_by_values[0]
right_by_values = right_by_values[0]
else:
left_by_values = flip(left_by_values)
right_by_values = flip(right_by_values)
# upcast 'by' parameter because HashTable is limited
by_type = _get_cython_type_upcast(left_by_values.dtype)
by_type_caster = _type_casters[by_type]
left_by_values = by_type_caster(left_by_values)
right_by_values = by_type_caster(right_by_values)
# choose appropriate function by type
func = _asof_by_function(self.direction)
return func(left_values,
right_values,
left_by_values,
right_by_values,
self.allow_exact_matches,
tolerance)
else:
# choose appropriate function by type
func = _asof_function(self.direction)
return func(left_values,
right_values,
self.allow_exact_matches,
tolerance)
def _get_multiindex_indexer(join_keys, index, sort):
from functools import partial
# bind `sort` argument
fkeys = partial(_factorize_keys, sort=sort)
# left & right join labels and num. of levels at each location
rlab, llab, shape = map(list, zip(* map(fkeys, index.levels, join_keys)))
if sort:
rlab = list(map(np.take, rlab, index.labels))
else:
i8copy = lambda a: a.astype('i8', subok=False, copy=True)
rlab = list(map(i8copy, index.labels))
# fix right labels if there were any nulls
for i in range(len(join_keys)):
mask = index.labels[i] == -1
if mask.any():
# check if there already was any nulls at this location
# if there was, it is factorized to `shape[i] - 1`
a = join_keys[i][llab[i] == shape[i] - 1]
if a.size == 0 or not a[0] != a[0]:
shape[i] += 1
rlab[i][mask] = shape[i] - 1
# get flat i8 join keys
lkey, rkey = _get_join_keys(llab, rlab, shape, sort)
# factorize keys to a dense i8 space
lkey, rkey, count = fkeys(lkey, rkey)
return libjoin.left_outer_join(lkey, rkey, count, sort=sort)
def _get_single_indexer(join_key, index, sort=False):
left_key, right_key, count = _factorize_keys(join_key, index, sort=sort)
left_indexer, right_indexer = libjoin.left_outer_join(
ensure_int64(left_key),
ensure_int64(right_key),
count, sort=sort)
return left_indexer, right_indexer
def _left_join_on_index(left_ax, right_ax, join_keys, sort=False):
if len(join_keys) > 1:
if not ((isinstance(right_ax, MultiIndex) and
len(join_keys) == right_ax.nlevels)):
raise AssertionError("If more than one join key is given then "
"'right_ax' must be a MultiIndex and the "
"number of join keys must be the number of "
"levels in right_ax")
left_indexer, right_indexer = \
_get_multiindex_indexer(join_keys, right_ax, sort=sort)
else:
jkey = join_keys[0]
left_indexer, right_indexer = \
_get_single_indexer(jkey, right_ax, sort=sort)
if sort or len(left_ax) != len(left_indexer):
# if asked to sort or there are 1-to-many matches
join_index = left_ax.take(left_indexer)
return join_index, left_indexer, right_indexer
# left frame preserves order & length of its index
return left_ax, None, right_indexer
def _right_outer_join(x, y, max_groups):
right_indexer, left_indexer = libjoin.left_outer_join(y, x, max_groups)
return left_indexer, right_indexer
_join_functions = {
'inner': libjoin.inner_join,
'left': libjoin.left_outer_join,
'right': _right_outer_join,
'outer': libjoin.full_outer_join,
}
def _factorize_keys(lk, rk, sort=True):
if is_datetime64tz_dtype(lk) and is_datetime64tz_dtype(rk):
lk = lk.values
rk = rk.values
# if we exactly match in categories, allow us to factorize on codes
if (is_categorical_dtype(lk) and
is_categorical_dtype(rk) and
lk.is_dtype_equal(rk)):
klass = libhashtable.Int64Factorizer
if lk.categories.equals(rk.categories):
rk = rk.codes
else:
# Same categories in different orders -> recode
rk = _recode_for_categories(rk.codes, rk.categories, lk.categories)
lk = ensure_int64(lk.codes)
rk = ensure_int64(rk)
elif is_int_or_datetime_dtype(lk) and is_int_or_datetime_dtype(rk):
klass = libhashtable.Int64Factorizer
lk = ensure_int64(com.values_from_object(lk))
rk = ensure_int64(com.values_from_object(rk))
else:
klass = libhashtable.Factorizer
lk = ensure_object(lk)
rk = ensure_object(rk)
rizer = klass(max(len(lk), len(rk)))
llab = rizer.factorize(lk)
rlab = rizer.factorize(rk)
count = rizer.get_count()
if sort:
uniques = rizer.uniques.to_array()
llab, rlab = _sort_labels(uniques, llab, rlab)
# NA group
lmask = llab == -1
lany = lmask.any()
rmask = rlab == -1
rany = rmask.any()
if lany or rany:
if lany:
np.putmask(llab, lmask, count)
if rany:
np.putmask(rlab, rmask, count)
count += 1
return llab, rlab, count
def _sort_labels(uniques, left, right):
if not isinstance(uniques, np.ndarray):
# tuplesafe
uniques = Index(uniques).values
llength = len(left)
labels = np.concatenate([left, right])
_, new_labels = sorting.safe_sort(uniques, labels, na_sentinel=-1)
new_labels = ensure_int64(new_labels)
new_left, new_right = new_labels[:llength], new_labels[llength:]
return new_left, new_right
def _get_join_keys(llab, rlab, shape, sort):
# how many levels can be done without overflow
pred = lambda i: not is_int64_overflow_possible(shape[:i])
nlev = next(filter(pred, range(len(shape), 0, -1)))
# get keys for the first `nlev` levels
stride = np.prod(shape[1:nlev], dtype='i8')
lkey = stride * llab[0].astype('i8', subok=False, copy=False)
rkey = stride * rlab[0].astype('i8', subok=False, copy=False)
for i in range(1, nlev):
with np.errstate(divide='ignore'):
stride //= shape[i]
lkey += llab[i] * stride
rkey += rlab[i] * stride
if nlev == len(shape): # all done!
return lkey, rkey
# densify current keys to avoid overflow
lkey, rkey, count = _factorize_keys(lkey, rkey, sort=sort)
llab = [lkey] + llab[nlev:]
rlab = [rkey] + rlab[nlev:]
shape = [count] + shape[nlev:]
return _get_join_keys(llab, rlab, shape, sort)
def _should_fill(lname, rname):
if (not isinstance(lname, compat.string_types) or
not isinstance(rname, compat.string_types)):
return True
return lname == rname
def _any(x):
return x is not None and com._any_not_none(*x)
def validate_operand(obj):
if isinstance(obj, DataFrame):
return obj
elif isinstance(obj, Series):
if obj.name is None:
raise ValueError('Cannot merge a Series without a name')
else:
return obj.to_frame()
else:
raise TypeError('Can only merge Series or DataFrame objects, '
'a {obj} was passed'.format(obj=type(obj)))
| bsd-3-clause |
wwf5067/statsmodels | statsmodels/sandbox/stats/stats_dhuard.py | 33 | 10184 | '''
from David Huard's scipy sandbox, also attached to a ticket and
in the matplotlib-user mailinglist (links ???)
Notes
=====
out of bounds interpolation raises exception and wouldn't be completely
defined ::
>>> scoreatpercentile(x, [0,25,50,100])
Traceback (most recent call last):
...
raise ValueError("A value in x_new is below the interpolation "
ValueError: A value in x_new is below the interpolation range.
>>> percentileofscore(x, [-50, 50])
Traceback (most recent call last):
...
raise ValueError("A value in x_new is below the interpolation "
ValueError: A value in x_new is below the interpolation range.
idea
====
histogram and empirical interpolated distribution
-------------------------------------------------
dual constructor
* empirical cdf : cdf on all observations through linear interpolation
* binned cdf : based on histogram
both should work essentially the same, although pdf of empirical has
many spikes, fluctuates a lot
- alternative: binning based on interpolated cdf : example in script
* ppf: quantileatscore based on interpolated cdf
* rvs : generic from ppf
* stats, expectation ? how does integration wrt cdf work - theory?
Problems
* limits, lower and upper bound of support
does not work or is undefined with empirical cdf and interpolation
* extending bounds ?
matlab has pareto tails for empirical distribution, breaks linearity
empirical distribution with higher order interpolation
------------------------------------------------------
* should work easily enough with interpolating splines
* not piecewise linear
* can use pareto (or other) tails
* ppf how do I get the inverse function of a higher order spline?
Chuck: resample and fit spline to inverse function
this will have an approximation error in the inverse function
* -> doesn't work: higher order spline doesn't preserve monotonicity
see mailing list for response to my question
* pmf from derivative available in spline
-> forget this and use kernel density estimator instead
bootstrap/empirical distribution:
---------------------------------
discrete distribution on real line given observations
what's defined?
* cdf : step function
* pmf : points with equal weight 1/nobs
* rvs : resampling
* ppf : quantileatscore on sample?
* moments : from data ?
* expectation ? sum_{all observations x} [func(x) * pmf(x)]
* similar for discrete distribution on real line
* References : ?
* what's the point? most of it is trivial, just for the record ?
Created on Monday, May 03, 2010, 11:47:03 AM
Author: josef-pktd, parts based on David Huard
License: BSD
'''
from __future__ import print_function
import scipy.interpolate as interpolate
import numpy as np
def scoreatpercentile(data, percentile):
"""Return the score at the given percentile of the data.
Example:
>>> data = randn(100)
>>> scoreatpercentile(data, 50)
will return the median of sample `data`.
"""
per = np.array(percentile)
cdf = empiricalcdf(data)
interpolator = interpolate.interp1d(np.sort(cdf), np.sort(data))
return interpolator(per/100.)
def percentileofscore(data, score):
"""Return the percentile-position of score relative to data.
score: Array of scores at which the percentile is computed.
Return percentiles (0-100).
Example
r = randn(50)
x = linspace(-2,2,100)
percentileofscore(r,x)
Raise an error if the score is outside the range of data.
"""
cdf = empiricalcdf(data)
interpolator = interpolate.interp1d(np.sort(data), np.sort(cdf))
return interpolator(score)*100.
def empiricalcdf(data, method='Hazen'):
"""Return the empirical cdf.
Methods available:
Hazen: (i-0.5)/N
Weibull: i/(N+1)
Chegodayev: (i-.3)/(N+.4)
Cunnane: (i-.4)/(N+.2)
Gringorten: (i-.44)/(N+.12)
California: (i-1)/N
Where i goes from 1 to N.
"""
i = np.argsort(np.argsort(data)) + 1.
N = len(data)
method = method.lower()
if method == 'hazen':
cdf = (i-0.5)/N
elif method == 'weibull':
cdf = i/(N+1.)
elif method == 'california':
cdf = (i-1.)/N
elif method == 'chegodayev':
cdf = (i-.3)/(N+.4)
elif method == 'cunnane':
cdf = (i-.4)/(N+.2)
elif method == 'gringorten':
cdf = (i-.44)/(N+.12)
else:
raise ValueError('Unknown method. Choose among Weibull, Hazen,'
'Chegodayev, Cunnane, Gringorten and California.')
return cdf
class HistDist(object):
'''Distribution with piecewise linear cdf, pdf is step function
can be created from empiricial distribution or from a histogram (not done yet)
work in progress, not finished
'''
def __init__(self, data):
self.data = np.atleast_1d(data)
self.binlimit = np.array([self.data.min(), self.data.max()])
sortind = np.argsort(data)
self._datasorted = data[sortind]
self.ranking = np.argsort(sortind)
cdf = self.empiricalcdf()
self._empcdfsorted = np.sort(cdf)
self.cdfintp = interpolate.interp1d(self._datasorted, self._empcdfsorted)
self.ppfintp = interpolate.interp1d(self._empcdfsorted, self._datasorted)
def empiricalcdf(self, data=None, method='Hazen'):
"""Return the empirical cdf.
Methods available:
Hazen: (i-0.5)/N
Weibull: i/(N+1)
Chegodayev: (i-.3)/(N+.4)
Cunnane: (i-.4)/(N+.2)
Gringorten: (i-.44)/(N+.12)
California: (i-1)/N
Where i goes from 1 to N.
"""
if data is None:
data = self.data
i = self.ranking
else:
i = np.argsort(np.argsort(data)) + 1.
N = len(data)
method = method.lower()
if method == 'hazen':
cdf = (i-0.5)/N
elif method == 'weibull':
cdf = i/(N+1.)
elif method == 'california':
cdf = (i-1.)/N
elif method == 'chegodayev':
cdf = (i-.3)/(N+.4)
elif method == 'cunnane':
cdf = (i-.4)/(N+.2)
elif method == 'gringorten':
cdf = (i-.44)/(N+.12)
else:
raise ValueError('Unknown method. Choose among Weibull, Hazen,'
'Chegodayev, Cunnane, Gringorten and California.')
return cdf
def cdf_emp(self, score):
'''
this is score in dh
'''
return self.cdfintp(score)
#return percentileofscore(self.data, score)
def ppf_emp(self, quantile):
'''
this is score in dh
'''
return self.ppfintp(quantile)
#return scoreatpercentile(self.data, quantile*100)
#from DHuard http://old.nabble.com/matplotlib-f2903.html
def optimize_binning(self, method='Freedman'):
"""Find the optimal number of bins and update the bin countaccordingly.
Available methods : Freedman
Scott
"""
nobs = len(self.data)
if method=='Freedman':
IQR = self.ppf_emp(0.75) - self.ppf_emp(0.25) # Interquantile range(75% -25%)
width = 2* IQR* nobs**(-1./3)
elif method=='Scott':
width = 3.49 * np.std(self.data) * nobs**(-1./3)
self.nbin = (self.binlimit.ptp()/width)
return self.nbin
#changes: josef-pktd
if __name__ == '__main__':
import matplotlib.pyplot as plt
nobs = 100
x = np.random.randn(nobs)
examples = [2]
if 1 in examples:
empiricalcdf(x)
print(percentileofscore(x, 0.5))
print(scoreatpercentile(x, 50))
import matplotlib.pyplot as plt
xsupp = np.linspace(x.min(), x.max())
pos = percentileofscore(x, xsupp)
plt.plot(xsupp, pos)
#perc = np.linspace(2.5, 97.5)
#plt.plot(scoreatpercentile(x, perc), perc)
plt.plot(scoreatpercentile(x, pos), pos+1)
#emp = interpolate.PiecewisePolynomial(np.sort(empiricalcdf(x)), np.sort(x))
emp=interpolate.InterpolatedUnivariateSpline(np.sort(x),np.sort(empiricalcdf(x)),k=1)
pdfemp = np.array([emp.derivatives(xi)[1] for xi in xsupp])
plt.figure()
plt.plot(xsupp,pdfemp)
cdf_ongrid = emp(xsupp)
plt.figure()
plt.plot(xsupp, cdf_ongrid)
#get pdf from interpolated cdf on a regular grid
plt.figure()
plt.step(xsupp[:-1],np.diff(cdf_ongrid)/np.diff(xsupp))
#reduce number of bins/steps
xsupp2 = np.linspace(x.min(), x.max(), 25)
plt.figure()
plt.step(xsupp2[:-1],np.diff(emp(xsupp2))/np.diff(xsupp2))
#pdf using 25 original observations, every (nobs/25)th
xso = np.sort(x)
xs = xso[::nobs/25]
plt.figure()
plt.step(xs[:-1],np.diff(emp(xs))/np.diff(xs))
#lower end looks strange
histd = HistDist(x)
print(histd.optimize_binning())
print(histd.cdf_emp(histd.binlimit))
print(histd.ppf_emp([0.25, 0.5, 0.75]))
print(histd.cdf_emp([-0.5, -0.25, 0, 0.25, 0.5]))
xsupp = np.linspace(x.min(), x.max(), 500)
emp=interpolate.InterpolatedUnivariateSpline(np.sort(x),np.sort(empiricalcdf(x)),k=1)
#pdfemp = np.array([emp.derivatives(xi)[1] for xi in xsupp])
#plt.figure()
#plt.plot(xsupp,pdfemp)
cdf_ongrid = emp(xsupp)
plt.figure()
plt.plot(xsupp, cdf_ongrid)
ppfintp = interpolate.InterpolatedUnivariateSpline(cdf_ongrid,xsupp,k=3)
ppfs = ppfintp(cdf_ongrid)
plt.plot(ppfs, cdf_ongrid)
#ppfemp=interpolate.InterpolatedUnivariateSpline(np.sort(empiricalcdf(x)),np.sort(x),k=3)
#Don't use interpolating splines for function approximation
#with s=0.03 the spline is monotonic at the evaluated values
ppfemp=interpolate.UnivariateSpline(np.sort(empiricalcdf(x)),np.sort(x),k=3, s=0.03)
ppfe = ppfemp(cdf_ongrid)
plt.plot(ppfe, cdf_ongrid)
print('negative density')
print('(np.diff(ppfs)).min()', (np.diff(ppfs)).min())
print('(np.diff(cdf_ongrid)).min()', (np.diff(cdf_ongrid)).min())
#plt.show()
| bsd-3-clause |
bert9bert/statsmodels | statsmodels/sandbox/tests/test_predict_functional.py | 29 | 12873 | from statsmodels.sandbox.predict_functional import predict_functional
import numpy as np
import pandas as pd
import statsmodels.api as sm
from numpy.testing import dec
# If true, the output is written to a multi-page pdf file.
pdf_output = False
try:
import matplotlib.pyplot as plt
import matplotlib
have_matplotlib = True
except ImportError:
have_matplotlib = False
def pctl(q):
return lambda x : np.percentile(x, 100 *q)
class TestPredFunc(object):
@classmethod
def setup_class(cls):
if pdf_output:
from matplotlib.backends.backend_pdf import PdfPages
cls.pdf = PdfPages("predict_functional.pdf")
@classmethod
def teardown_class(cls):
if pdf_output:
cls.pdf.close()
def close_or_save(self, fig):
if pdf_output:
self.pdf.savefig(fig)
else:
plt.close(fig)
@dec.skipif(not have_matplotlib)
def test_formula(self):
np.random.seed(542)
n = 500
x1 = np.random.normal(size=n)
x2 = np.random.normal(size=n)
x3 = np.random.normal(size=n)
x4 = np.random.randint(0, 5, size=n)
x4 = np.asarray(["ABCDE"[i] for i in x4])
x5 = np.random.normal(size=n)
y = 0.3*x2**2 + (x4 == "B") + 0.1*(x4 == "B")*x2**2 + x5 + np.random.normal(size=n)
df = pd.DataFrame({"y": y, "x1": x1, "x2": x2, "x3": x3, "x4": x4, "x5": x5})
fml = "y ~ x1 + bs(x2, df=4) + x3 + x2*x3 + I(x1**2) + C(x4) + C(x4)*bs(x2, df=4) + x5"
model = sm.OLS.from_formula(fml, data=df)
result = model.fit()
summaries = {"x1": np.mean, "x3": pctl(0.75), "x5": np.mean}
values = {"x4": "B"}
pr1, ci1, fvals1 = predict_functional(result, "x2", summaries, values)
values = {"x4": "C"}
pr2, ci2, fvals2 = predict_functional(result, "x2", summaries, values)
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.7, 0.8])
plt.plot(fvals1, pr1, '-', label='x4=B')
plt.plot(fvals2, pr2, '-', label='x4=C')
ha, lb = ax.get_legend_handles_labels()
plt.figlegend(ha, lb, "center right")
plt.xlabel("Focus variable", size=15)
plt.ylabel("Fitted mean", size=15)
plt.title("Linear model prediction")
self.close_or_save(fig)
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.7, 0.8])
plt.plot(fvals1, pr1, '-', label='x4=B')
plt.fill_between(fvals1, ci1[:, 0], ci1[:, 1], color='grey')
plt.plot(fvals2, pr2, '-', label='x4=C')
plt.fill_between(fvals2, ci2[:, 0], ci2[:, 1], color='grey')
ha, lb = ax.get_legend_handles_labels()
plt.figlegend(ha, lb, "center right")
plt.xlabel("Focus variable", size=15)
plt.ylabel("Fitted mean", size=15)
plt.title("Linear model prediction")
self.close_or_save(fig)
@dec.skipif(not have_matplotlib)
def test_lm_contrast(self):
np.random.seed(542)
n = 200
x1 = np.random.normal(size=n)
x2 = np.random.normal(size=n)
x3 = np.random.normal(size=n)
y = x1 + 2*x2 + x3 - x1*x2 + x2*x3 + np.random.normal(size=n)
df = pd.DataFrame({"y": y, "x1": x1, "x2": x2, "x3": x3})
fml = "y ~ x1 + x2 + x3 + x1*x2 + x2*x3"
model = sm.OLS.from_formula(fml, data=df)
result = model.fit()
values = {"x2": 1, "x3": 1} # y = 4
values2 = {"x2": 0, "x3": 0} # y = x1
pr, cb, fvals = predict_functional(result, "x1", values=values,
values2=values2, ci_method='scheffe')
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.67, 0.8])
plt.plot(fvals, pr, '-', label="Estimate", color='orange', lw=4)
plt.plot(fvals, 4 - fvals, '-', label="Truth", color='lime', lw=4)
plt.fill_between(fvals, cb[:, 0], cb[:, 1], color='grey')
ha, lb = ax.get_legend_handles_labels()
leg = plt.figlegend(ha, lb, "center right")
leg.draw_frame(False)
plt.xlabel("Focus variable", size=15)
plt.ylabel("Mean contrast", size=15)
plt.title("Linear model contrast")
self.close_or_save(fig)
@dec.skipif(not have_matplotlib)
def test_glm_formula_contrast(self):
np.random.seed(542)
n = 50
x1 = np.random.normal(size=n)
x2 = np.random.normal(size=n)
x3 = np.random.normal(size=n)
mn = 5 + 0.1*x1 + 0.1*x2 + 0.1*x3 - 0.1*x1*x2
y = np.random.poisson(np.exp(mn), size=len(mn))
df = pd.DataFrame({"y": y, "x1": x1, "x2": x2, "x3": x3})
fml = "y ~ x1 + x2 + x3 + x1*x2"
model = sm.GLM.from_formula(fml, data=df, family=sm.families.Poisson())
result = model.fit()
values = {"x2": 1, "x3": 1} # y = 5.2
values2 = {"x2": 0, "x3": 0} # y = 5 + 0.1*x1
pr, cb, fvals = predict_functional(result, "x1", values=values,
values2=values2, ci_method='simultaneous')
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.67, 0.8])
plt.plot(fvals, pr, '-', label="Estimate", color='orange', lw=4)
plt.plot(fvals, 0.2 - 0.1*fvals, '-', label="Truth", color='lime', lw=4)
plt.fill_between(fvals, cb[:, 0], cb[:, 1], color='grey')
ha, lb = ax.get_legend_handles_labels()
leg = plt.figlegend(ha, lb, "center right")
leg.draw_frame(False)
plt.xlabel("Focus variable", size=15)
plt.ylabel("Linear predictor contrast", size=15)
plt.title("Poisson regression contrast")
self.close_or_save(fig)
@dec.skipif(not have_matplotlib)
def test_scb(self):
np.random.seed(473)
n = 100
x = np.random.normal(size=(n,4))
x[:, 0] = 1
for fam_name in "poisson", "binomial", "gaussian":
if fam_name == "poisson":
y = np.random.poisson(20, size=n)
fam = sm.families.Poisson()
true_mean = 20
true_lp = np.log(20)
elif fam_name == "binomial":
y = 1 * (np.random.uniform(size=n) < 0.5)
fam = sm.families.Binomial()
true_mean = 0.5
true_lp = 0
elif fam_name == "gaussian":
y = np.random.normal(size=n)
fam = sm.families.Gaussian()
true_mean = 0
true_lp = 0
model = sm.GLM(y, x, family=fam)
result = model.fit()
# CB is for linear predictor or mean response
for linear in False, True:
true = true_lp if linear else true_mean
values = {'const': 1, "x2": 0}
summaries = {"x3": np.mean}
pred1, cb1, fvals1 = predict_functional(result, "x1",
values=values, summaries=summaries, linear=linear)
pred2, cb2, fvals2 = predict_functional(result, "x1",
values=values, summaries=summaries,
ci_method='simultaneous', linear=linear)
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.58, 0.8])
plt.plot(fvals1, pred1, '-', color='black', label='Estimate')
plt.plot(fvals1, true * np.ones(len(pred1)), '-', color='purple',
label='Truth')
plt.plot(fvals1, cb1[:, 0], color='blue', label='Pointwise CB')
plt.plot(fvals1, cb1[:, 1], color='blue')
plt.plot(fvals2, cb2[:, 0], color='green', label='Simultaneous CB')
plt.plot(fvals2, cb2[:, 1], color='green')
ha, lb = ax.get_legend_handles_labels()
leg = plt.figlegend(ha, lb, "center right")
leg.draw_frame(False)
plt.xlabel("Focus variable", size=15)
if linear:
plt.ylabel("Linear predictor", size=15)
else:
plt.ylabel("Fitted mean", size=15)
plt.title("%s family prediction" % fam_name.capitalize())
self.close_or_save(fig)
@dec.skipif(not have_matplotlib)
def test_glm_formula(self):
np.random.seed(542)
n = 500
x1 = np.random.normal(size=n)
x2 = np.random.normal(size=n)
x3 = np.random.randint(0, 3, size=n)
x3 = np.asarray(["ABC"[i] for i in x3])
lin_pred = -1 + 0.5*x1**2 + (x3 == "B")
prob = 1 / (1 + np.exp(-lin_pred))
y = 1 * (np.random.uniform(size=n) < prob)
df = pd.DataFrame({"y": y, "x1": x1, "x2": x2, "x3": x3})
fml = "y ~ x1 + I(x1**2) + x2 + C(x3)"
model = sm.GLM.from_formula(fml, family=sm.families.Binomial(), data=df)
result = model.fit()
summaries = {"x2": np.mean}
for linear in False, True:
values = {"x3": "B"}
pr1, ci1, fvals1 = predict_functional(result, "x1", summaries, values, linear=linear)
values = {"x3": "C"}
pr2, ci2, fvals2 = predict_functional(result, "x1", summaries, values, linear=linear)
exact1 = -1 + 0.5*fvals1**2 + 1
exact2 = -1 + 0.5*fvals2**2
if not linear:
exact1 = 1 / (1 + np.exp(-exact1))
exact2 = 1 / (1 + np.exp(-exact2))
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.7, 0.8])
plt.plot(fvals1, pr1, '-', label='x3=B')
plt.plot(fvals2, pr2, '-', label='x3=C')
plt.plot(fvals1, exact1, '-', label='x3=B (exact)')
plt.plot(fvals2, exact2, '-', label='x3=C (exact)')
ha, lb = ax.get_legend_handles_labels()
plt.figlegend(ha, lb, "center right")
plt.xlabel("Focus variable", size=15)
if linear:
plt.ylabel("Fitted linear predictor", size=15)
else:
plt.ylabel("Fitted probability", size=15)
plt.title("Binomial GLM prediction")
self.close_or_save(fig)
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.7, 0.8])
plt.plot(fvals1, pr1, '-', label='x3=B', color='orange')
plt.fill_between(fvals1, ci1[:, 0], ci1[:, 1], color='grey')
plt.plot(fvals2, pr2, '-', label='x3=C', color='lime')
plt.fill_between(fvals2, ci2[:, 0], ci2[:, 1], color='grey')
ha, lb = ax.get_legend_handles_labels()
plt.figlegend(ha, lb, "center right")
plt.xlabel("Focus variable", size=15)
if linear:
plt.ylabel("Fitted linear predictor", size=15)
else:
plt.ylabel("Fitted probability", size=15)
plt.title("Binomial GLM prediction")
self.close_or_save(fig)
@dec.skipif(not have_matplotlib)
def test_noformula_prediction(self):
np.random.seed(6434)
n = 200
x1 = np.random.normal(size=n)
x2 = np.random.normal(size=n)
x3 = np.random.normal(size=n)
y = x1 - x2 + np.random.normal(size=n)
exog = np.vstack((x1, x2, x3)).T
model = sm.OLS(y, exog)
result = model.fit()
summaries = {"x3": pctl(0.75)}
values = {"x2": 1}
pr1, ci1, fvals1 = predict_functional(result, "x1", summaries, values)
values = {"x2": -1}
pr2, ci2, fvals2 = predict_functional(result, "x1", summaries, values)
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.7, 0.8])
plt.plot(fvals1, pr1, '-', label='x2=1', lw=4, alpha=0.6, color='orange')
plt.plot(fvals2, pr2, '-', label='x2=-1', lw=4, alpha=0.6, color='lime')
ha, lb = ax.get_legend_handles_labels()
leg = plt.figlegend(ha, lb, "center right")
leg.draw_frame(False)
plt.xlabel("Focus variable", size=15)
plt.ylabel("Fitted mean", size=15)
plt.title("Linear model prediction")
self.close_or_save(fig)
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.7, 0.8])
plt.plot(fvals1, pr1, '-', label='x2=1', lw=4, alpha=0.6, color='orange')
plt.fill_between(fvals1, ci1[:, 0], ci1[:, 1], color='grey')
plt.plot(fvals1, pr2, '-', label='x2=1', lw=4, alpha=0.6, color='lime')
plt.fill_between(fvals2, ci2[:, 0], ci2[:, 1], color='grey')
ha, lb = ax.get_legend_handles_labels()
plt.figlegend(ha, lb, "center right")
plt.xlabel("Focus variable", size=15)
plt.ylabel("Fitted mean", size=15)
plt.title("Linear model prediction")
self.close_or_save(fig)
| bsd-3-clause |
EPFL-LCN/neuronaldynamics-exercises | neurodynex3/hopfield_network/demo.py | 1 | 7372 |
# This file is part of the exercise code repository accompanying
# the book: Neuronal Dynamics (see http://neuronaldynamics.epfl.ch)
# located at http://github.com/EPFL-LCN/neuronaldynamics-exercises.
# This free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License 2.0 as published by the
# Free Software Foundation. You should have received a copy of the
# GNU General Public License along with the repository. If not,
# see http://www.gnu.org/licenses/.
# Should you reuse and publish the code for your own purposes,
# please cite the book or point to the webpage http://neuronaldynamics.epfl.ch.
# Wulfram Gerstner, Werner M. Kistler, Richard Naud, and Liam Paninski.
# Neuronal Dynamics: From Single Neurons to Networks and Models of Cognition.
# Cambridge University Press, 2014.
import neurodynex3.hopfield_network.plot_tools as hfplot
import neurodynex3.hopfield_network.pattern_tools as pattern_tools
import neurodynex3.hopfield_network.network as network
import matplotlib.pyplot as plt
import numpy as np
def run_hf_demo(pattern_size=4, nr_random_patterns=3, reference_pattern=0,
initially_flipped_pixels=3, nr_iterations=6, random_seed=None):
"""
Simple demo.
Args:
pattern_size:
nr_random_patterns:
reference_pattern:
initially_flipped_pixels:
nr_iterations:
random_seed:
Returns:
"""
# instantiate a hofpfield network
hopfield_net = network.HopfieldNetwork(pattern_size**2)
# for the demo, use a seed to get a reproducible pattern
np.random.seed(random_seed)
# instantiate a pattern factory
factory = pattern_tools.PatternFactory(pattern_size, pattern_size)
# create a checkerboard pattern and add it to the pattern list
checkerboard = factory.create_checkerboard()
pattern_list = [checkerboard]
# add random patterns to the list
pattern_list.extend(factory.create_random_pattern_list(nr_random_patterns, on_probability=0.5))
hfplot.plot_pattern_list(pattern_list)
# let the hopfield network "learn" the patterns. Note: they are not stored
# explicitly but only network weights are updated !
hopfield_net.store_patterns(pattern_list)
# how similar are the random patterns? Check the overlaps
overlap_matrix = pattern_tools.compute_overlap_matrix(pattern_list)
hfplot.plot_overlap_matrix(overlap_matrix)
# create a noisy version of a pattern and use that to initialize the network
noisy_init_state = pattern_tools.flip_n(pattern_list[reference_pattern], initially_flipped_pixels)
hopfield_net.set_state_from_pattern(noisy_init_state)
# uncomment the following line to enable a PROBABILISTIC network dynamic
# hopfield_net.set_dynamics_probabilistic_sync(2.5)
# uncomment the following line to enable an ASYNCHRONOUS network dynamic
# hopfield_net.set_dynamics_sign_async()
# run the network dynamics and record the network state at every time step
states = hopfield_net.run_with_monitoring(nr_iterations)
# each network state is a vector. reshape it to the same shape used to create the patterns.
states_as_patterns = factory.reshape_patterns(states)
# plot the states of the network
hfplot.plot_state_sequence_and_overlap(states_as_patterns, pattern_list, reference_pattern)
plt.show()
def run_hf_demo_alphabet(letters, initialization_noise_level=0.2, random_seed=None):
"""
Simple demo
Args:
letters:
initialization_noise_level:
random_seed:
Returns:
"""
# fixed size 10 for the alphabet.
pattern_size = 10
# pick some letters we want to store in the network
if letters is None:
letters = ['A', 'B', 'C', 'R', 'S', 'X', 'Y', 'Z']
reference_pattern = 0
# instantiate a hofpfield network
hopfield_net = network.HopfieldNetwork(pattern_size**2)
# for the demo, use a seed to get a reproducible pattern
np.random.seed(random_seed)
# load the dictionary
abc_dict = pattern_tools.load_alphabet()
# for each key in letters, append the pattern to the list
pattern_list = [abc_dict[key] for key in letters]
hfplot.plot_pattern_list(pattern_list)
hopfield_net.store_patterns(pattern_list)
hopfield_net.set_state_from_pattern(
pattern_tools.get_noisy_copy(abc_dict[letters[reference_pattern]], initialization_noise_level))
states = hopfield_net.run_with_monitoring(6)
state_patterns = pattern_tools.reshape_patterns(states, pattern_list[0].shape)
hfplot.plot_state_sequence_and_overlap(state_patterns, pattern_list, reference_pattern)
plt.show()
def run_demo():
"""
Simple demo
"""
# Demo2: more neurons, more patterns, more noise
# run_hf_demo(pattern_size=6, nr_random_patterns=5, initially_flipped_pixels=11, nr_iterations=5)
# Demo3: more parameters
# run_hf_demo(pattern_size=4, nr_random_patterns=5,
# reference_pattern=0, initially_flipped_pixels=4, nr_iterations=6,
# random_seed=50)
print('recover letter A')
letter_list = ['A', 'B', 'C', 'S', 'X', 'Y', 'Z']
run_hf_demo_alphabet(letter_list, initialization_noise_level=0.2, random_seed=76)
print('letter A not recovered despite the overlap m=1 after one iteration')
letter_list.append('R')
run_hf_demo_alphabet(letter_list, initialization_noise_level=0.2, random_seed=76)
def run_user_function_demo():
def upd_random(state_s0, weights):
nr_neurons = len(state_s0)
random_neuron_idx_list = np.random.permutation(int(len(state_s0)/2))
state_s1 = state_s0.copy()
for i in range(len(random_neuron_idx_list)):
state_s1[i] = -1 if (np.random.rand() < .5) else +1
return state_s1
hopfield_net = network.HopfieldNetwork(6**2)
hopfield_net.set_dynamics_to_user_function(upd_random)
# for the demo, use a seed to get a reproducible pattern
# instantiate a pattern factory
factory = pattern_tools.PatternFactory(6, 6)
# create a checkerboard pattern and add it to the pattern list
checkerboard = factory.create_checkerboard()
pattern_list = [checkerboard]
# add random patterns to the list
pattern_list.extend(factory.create_random_pattern_list(4, on_probability=0.5))
hfplot.plot_pattern_list(pattern_list)
# let the hopfield network "learn" the patterns. Note: they are not stored
# explicitly but only network weights are updated !
hopfield_net.store_patterns(pattern_list)
hopfield_net.set_state_from_pattern(pattern_list[0])
# uncomment the following line to enable a PROBABILISTIC network dynamic
# hopfield_net.set_dynamics_probabilistic_sync(2.5)
# uncomment the following line to enable an ASYNCHRONOUS network dynamic
# hopfield_net.set_dynamics_sign_async()
# run the network dynamics and record the network state at every time step
states = hopfield_net.run_with_monitoring(5)
# each network state is a vector. reshape it to the same shape used to create the patterns.
states_as_patterns = factory.reshape_patterns(states)
# plot the states of the network
hfplot.plot_state_sequence_and_overlap(states_as_patterns, pattern_list, 0)
plt.show()
if __name__ == '__main__':
run_demo()
# run_user_function_demo()
| gpl-2.0 |
BoltzmannBrain/nupic.research | projects/classification/sensor/sensortag_data/plot_sensortag_data.py | 3 | 2049 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import csv
import matplotlib.pyplot as plt
EXPERIMENTS = ['jump',
'run',
'sit',
'stumble',
'stairs-down',
'stairs-up',
'walk'
]
NUM_RECORDS_TO_PLOT = 80
plt.figure(figsize=(20, 10))
for exp in EXPERIMENTS:
filePath = "data/%s-5min.csv" % exp
with open(filePath, 'rU') as f:
reader = csv.reader(f)
headers = reader.next()
reader.next()
t = []
x = []
y = []
z = []
for i, values in enumerate(reader):
record = dict(zip(headers, values))
t.append(i)
x.append(record['x'])
y.append(record['y'])
z.append(record['z'])
if i > NUM_RECORDS_TO_PLOT:
break
subplot_index = EXPERIMENTS.index(exp)
plt.subplot(4, 2, subplot_index + 1)
plt.plot(t, x, 'r', t, y, 'b', t, z, 'g')
plt.tight_layout()
plt.title(exp)
plt.xlim([0, NUM_RECORDS_TO_PLOT])
plt.ylim([-8, 8])
plt.xlabel('timestep')
plt.ylabel('accelerometer')
plt.grid()
plt.show()
| agpl-3.0 |
abhisg/scikit-learn | examples/linear_model/plot_ransac.py | 250 | 1673 | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
| bsd-3-clause |
pratapvardhan/scikit-learn | sklearn/ensemble/partial_dependence.py | 251 | 15097 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause |
ClimbsRocks/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 387 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
kmather73/ggplot | ggplot/scales/scale_y_continuous.py | 12 | 1202 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .scale import scale
from copy import deepcopy
from matplotlib.pyplot import FuncFormatter
dollar = lambda x, pos: '$%1.2f' % x
currency = dollar
comma = lambda x, pos: '{:0,d}'.format(int(x))
millions = lambda x, pos: '$%1.1fM' % (x*1e-6)
percent = lambda x, pos: '{0:.0f}%'.format(x*100)
LABEL_FORMATS = {
'comma': comma,
'dollar': dollar,
'currency': currency,
'millions': millions,
'percent': percent
}
class scale_y_continuous(scale):
VALID_SCALES = ['name', 'labels', 'limits', 'breaks', 'trans']
def __radd__(self, gg):
gg = deepcopy(gg)
if self.name:
gg.ylab = self.name.title()
if not (self.labels is None):
if self.labels in LABEL_FORMATS:
format_func = LABEL_FORMATS[self.labels]
gg.ytick_formatter = FuncFormatter(format_func)
else:
gg.ytick_labels = self.labels
if not (self.limits is None):
gg.ylimits = self.limits
if not (self.breaks is None):
gg.ybreaks = self.breaks
return gg
| bsd-2-clause |
russel1237/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 71 | 25104 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import _parallel_pairwise
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unkown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels():
# Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "sigmoid", "polynomial", "linear", "chi2",
"additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {}
kwds['gamma'] = 0.1
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1)
Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
# The modified tests are not 1D. In the old test, the array was internally
# converted to 2D anyways
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
paris-saclay-cds/ramp-workflow | rampwf/tests/test_kits.py | 1 | 6694 | import os
import glob
import shutil
from textwrap import dedent
import cloudpickle
import pytest
from rampwf.utils import import_module_from_source
from rampwf.utils.testing import (
assert_submission, assert_notebook, blend_submissions)
PATH = os.path.dirname(__file__)
def skip_no_tensorflow():
try:
import tensorflow # noqa
except ImportError:
return pytest.mark.skip(reason='tensorflow not available')
return pytest.mark.basic
def _generate_grid_path_kits():
grid = []
for path_kit in sorted(glob.glob(os.path.join(PATH, 'kits', '*'))):
if 'digits' in path_kit:
grid.append(pytest.param(os.path.abspath(path_kit),
marks=skip_no_tensorflow()))
elif 'data_label' in path_kit:
pass
else:
grid.append(os.path.abspath(path_kit))
return grid
def test_external_imports(tmpdir):
# checking imports from an external_imports folder located in the
# ramp_kit_dir
# temporary kit
path_kit = tmpdir.join("titanic_external_imports")
shutil.copytree(os.path.join(PATH, "kits", "titanic"), path_kit)
problem_path = os.path.join(path_kit, "problem.py")
submissions_dir = os.path.join(path_kit, 'submissions')
submission_path = os.path.join(submissions_dir, 'starting_kit')
estimator_path = os.path.join(submission_path, "estimator.py")
# module to be imported
ext_module_dir = path_kit.mkdir("external_imports").mkdir("utils")
with open(os.path.join(ext_module_dir, "test_imports.py"), 'w+') as f:
f.write(
dedent(
"""
x = 2
"""
)
)
for path in [problem_path, estimator_path]:
with open(path, 'a') as f:
f.write(
dedent(
"""
from utils import test_imports
assert test_imports.x == 2
"""
)
)
assert_submission(
ramp_kit_dir=path_kit,
ramp_data_dir=path_kit,
ramp_submission_dir=submissions_dir,
submission=submission_path,
is_pickle=True,
save_output=False,
retrain=True)
@pytest.mark.parametrize(
"path_kit",
_generate_grid_path_kits())
def test_notebook_testing(path_kit):
# check if there is a notebook to be tested
if len(glob.glob(os.path.join(path_kit, '*.ipynb'))):
assert_notebook(ramp_kit_dir=path_kit)
@pytest.mark.parametrize(
"path_kit",
_generate_grid_path_kits()
)
def test_submission(path_kit):
submissions = sorted(glob.glob(os.path.join(path_kit, 'submissions', '*')))
for sub in submissions:
# FIXME: to be removed once el-nino tests is fixed.
if 'el_nino' in sub:
pytest.xfail('el-nino is failing due to xarray.')
else:
assert_submission(
ramp_kit_dir=path_kit,
ramp_data_dir=path_kit,
ramp_submission_dir=os.path.join(path_kit, 'submissions'),
submission=os.path.basename(sub), is_pickle=True,
save_output=False, retrain=True)
def test_blending():
assert_submission(
ramp_kit_dir=os.path.join(PATH, "kits", "iris"),
ramp_data_dir=os.path.join(PATH, "kits", "iris"),
ramp_submission_dir=os.path.join(PATH, "kits", "iris", "submissions"),
submission='starting_kit', is_pickle=True,
save_output=True, retrain=True)
assert_submission(
ramp_kit_dir=os.path.join(PATH, "kits", "iris"),
ramp_data_dir=os.path.join(PATH, "kits", "iris"),
ramp_submission_dir=os.path.join(PATH, "kits", "iris", "submissions"),
submission='random_forest_10_10', is_pickle=True,
save_output=True, retrain=True)
blend_submissions(
['starting_kit', 'random_forest_10_10'],
ramp_kit_dir=os.path.join(PATH, "kits", "iris"),
ramp_data_dir=os.path.join(PATH, "kits", "iris"),
ramp_submission_dir=os.path.join(PATH, "kits", "iris", "submissions"),
save_output=True)
# cleaning up so next test doesn't try to train "training_output"
shutil.rmtree(os.path.join(
PATH, "kits", "iris", "submissions", "training_output"))
def test_data_label():
assert_submission(
ramp_kit_dir=os.path.join(PATH, "kits", "iris_data_label"),
ramp_data_dir=os.path.join(PATH, "kits", "iris_data_label"),
data_label='data_label',
ramp_submission_dir=os.path.join(
PATH, "kits", "iris_data_label", "submissions"),
submission='starting_kit', is_pickle=True,
save_output=True, retrain=True)
assert_submission(
ramp_kit_dir=os.path.join(PATH, "kits", "iris_data_label"),
ramp_data_dir=os.path.join(PATH, "kits", "iris_data_label"),
data_label='data_label',
ramp_submission_dir=os.path.join(
PATH, "kits", "iris_data_label", "submissions"),
submission='random_forest_10_10', is_pickle=True,
save_output=True, retrain=True)
blend_submissions(
['starting_kit', 'random_forest_10_10'],
ramp_kit_dir=os.path.join(PATH, "kits", "iris_data_label"),
ramp_data_dir=os.path.join(PATH, "kits", "iris_data_label"),
data_label='data_label',
ramp_submission_dir=os.path.join(
PATH, "kits", "iris_data_label", "submissions"),
save_output=True)
# cleaning up so next test doesn't try to train "training_output"
shutil.rmtree(os.path.join(
PATH, "kits", "iris_data_label", "submissions", "training_output"))
def test_cloudpickle():
"""Check cloudpickle works with the way modules are imported from source.
This only checks that an object that can be pickled with cloudpickle can
still be pickled with cloudpickle when imported dynamically using
import_module_from_source.
"""
# use iris_old as the object has to be a custom class not an object
# from a python package that is in sys.path such as a sklearn object
kit = "iris_old"
ramp_kit_dir = os.path.join(PATH, "kits", kit)
ramp_data_dir = os.path.join(PATH, "kits", kit)
ramp_submission = os.path.join(PATH, "kits", kit, "submissions",
"starting_kit")
problem_module = import_module_from_source(
os.path.join(ramp_kit_dir, 'problem.py'), 'problem')
workflow = problem_module.workflow
X_train, y_train = problem_module.get_train_data(path=ramp_data_dir)
model = workflow.train_submission(ramp_submission, X_train, y_train)
# test cloudpickle
cloudpickle.dumps(model)
| bsd-3-clause |
zooniverse/aggregation | experimental/serengeti/IAAI/alg4.py | 2 | 1239 | #!/usr/bin/env python
__author__ = 'greg'
#check to see what different tau values give us
from nodes import setup, speciesList
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats.stats import pearsonr
numUser = [5,10,15,20,25]
algPercent = []
tauRange = np.arange(1,103,25)
for tau in tauRange:
print tau
algPercent.append([])
for j in range(10):
photos,users = setup(tau=tau)
for p in photos.values():
p.__sample__(20)
for u in users.values():
u.__prune__()
#initialize things using majority voting
for p in photos.values():
p.__majorityVote__()
#estimate the user's "correctness"
for u in users.values():
for s in speciesList:
u.__speciesCorrect__(s)
for p in photos.values():
p.__weightedMajorityVote__()
correct = 0
total = 0.
for p in photos.values():
if p.__goldStandardCompare__():
correct += 1
total += 1
algPercent[-1].append(correct/total)
meanValues = [np.mean(p) for p in algPercent]
std = [np.std(p) for p in algPercent]
plt.errorbar(tauRange,meanValues,yerr=std)
plt.show()
| apache-2.0 |
commoncrawl/cc-crawl-statistics | plot/crawl_size.py | 1 | 12466 | import os
import pandas
import re
import sys
import types
from collections import defaultdict
from hyperloglog import HyperLogLog
from crawlplot import CrawlPlot, PLOTDIR
from crawlstats import CST, CrawlStatsJSONDecoder, HYPERLOGLOG_ERROR,\
MonthlyCrawl
class CrawlSizePlot(CrawlPlot):
def __init__(self):
self.size = defaultdict(dict)
self.size_by_type = defaultdict(dict)
self.type_index = defaultdict(dict)
self.crawls = {}
self.ncrawls = 0
self.hll = defaultdict(dict)
self.N = 0
self.sum_counts = False
def add(self, key, val):
cst = CST[key[0]]
if cst not in (CST.size, CST.size_estimate):
return
item_type = key[1]
crawl = key[2]
count = 0
if cst == CST.size_estimate:
item_type = ' '.join([item_type, 'estim.'])
hll = CrawlStatsJSONDecoder.json_decode_hyperloglog(val)
count = len(hll)
self.hll[item_type][crawl] = hll
elif cst == CST.size:
count = val
self.add_by_type(crawl, item_type, count)
def add_by_type(self, crawl, item_type, count):
if crawl not in self.crawls:
self.crawls[crawl] = self.ncrawls
self.size['crawl'][self.ncrawls] = crawl
date = pandas.Timestamp(MonthlyCrawl.date_of(crawl))
self.size['date'][self.ncrawls] = date
self.ncrawls += 1
else:
date = self.size['date'][self.crawls[crawl]]
if item_type in self.size and \
self.crawls[crawl] in self.size[item_type]:
# add count to existing record?
if self.sum_counts:
count += self.size[item_type][self.crawls[crawl]]
self.size[item_type][self.crawls[crawl]] = count
_N = self.type_index[item_type][self.crawls[crawl]]
self.size_by_type['size'][_N] = count
return
self.size[item_type][self.crawls[crawl]] = count
self.size_by_type['crawl'][self.N] = crawl
self.size_by_type['date'][self.N] = date
self.size_by_type['type'][self.N] = item_type
self.size_by_type['size'][self.N] = count
self.type_index[item_type][self.crawls[crawl]] = self.N
self.N += 1
def cumulative_size(self):
latest_n_crawls_cumul = [2, 3, 4, 6, 9, 12]
total_pages = 0
sorted_crawls = sorted(self.crawls)
for crawl in sorted_crawls:
total_pages += self.size['page'][self.crawls[crawl]]
self.add_by_type(crawl, 'page cumul.', total_pages)
urls_cumul = defaultdict(dict)
for item_type in self.hll.keys():
item_type_cumul = ' '.join([item_type, 'cumul.'])
item_type_new = ' '.join([item_type, 'new'])
cumul_hll = HyperLogLog(HYPERLOGLOG_ERROR)
n = 0
hlls = []
for crawl in sorted(self.hll[item_type]):
n += 1
hll = self.hll[item_type][crawl]
last_cumul_hll_len = len(cumul_hll)
cumul_hll.update(hll)
# cumulative size
self.add_by_type(crawl, item_type_cumul, len(cumul_hll))
# new unseen items this crawl (since the first analyzed crawl)
unseen = (len(cumul_hll) - last_cumul_hll_len)
if unseen > len(hll):
# 1% error rate for cumulative HLLs is large in comparison
# to crawl size, adjust to size of items in this crawl
# (there can be no more new items than the size of the crawl)
unseen = len(hll)
self.add_by_type(crawl, item_type_new, unseen)
hlls.append(hll)
# cumulative size for last N crawls
for n_crawls in latest_n_crawls_cumul:
item_type_n_crawls = '{} cumul. last {} crawls'.format(
item_type, n_crawls)
if n_crawls <= len(hlls):
cum_hll = HyperLogLog(HYPERLOGLOG_ERROR)
for i in range(1, (n_crawls+1)):
if i > len(hlls):
break
cum_hll.update(hlls[-i])
size_last_n = len(cum_hll)
if item_type == 'url estim.':
urls_cumul[crawl][str(n_crawls)] = size_last_n
else:
size_last_n = 'nan'
self.add_by_type(crawl, item_type_n_crawls, size_last_n)
for n, crawl in enumerate(sorted_crawls):
for n_crawls in latest_n_crawls_cumul:
if n_crawls > (n+1):
self.add_by_type(crawl,
'page cumul. last {} crawls'.format(n_crawls),
'nan')
continue
cumul_pages = 0
for c in sorted_crawls[(1+n-n_crawls):(n+1)]:
cumul_pages += self.size['page'][self.crawls[c]]
self.add_by_type(crawl,
'page cumul. last {} crawls'.format(n_crawls),
cumul_pages)
urls_cumul[crawl][str(n_crawls)] = urls_cumul[crawl][str(n_crawls)]/cumul_pages
for crawl in urls_cumul:
for n_crawls in urls_cumul[crawl]:
self.add_by_type(crawl,
'URLs/pages last {} crawls'.format(n_crawls),
urls_cumul[crawl][n_crawls])
def transform_data(self):
self.size = pandas.DataFrame(self.size)
self.size_by_type = pandas.DataFrame(self.size_by_type)
def save_data(self):
self.size.to_csv('data/crawlsize.csv')
self.size_by_type.to_csv('data/crawlsizebytype.csv')
def duplicate_ratio(self):
# -- duplicate ratio
data = self.size[['crawl', 'page', 'url', 'digest estim.']]
data['1-(urls/pages)'] = 100 * (1.0 - (data['url'] / data['page']))
data['1-(digests/pages)'] = \
100 * (1.0 - (data['digest estim.'] / data['page']))
floatf = '{0:.1f}%'.format
print(data.to_string(formatters={'1-(urls/pages)': floatf,
'1-(digests/pages)': floatf}),
file=open('data/crawlduplicates.txt', 'w'))
def plot(self):
# -- size per crawl (pages, URL and content digest)
row_types = ['page', 'url', # 'url estim.',
'digest estim.']
self.size_plot(self.size_by_type, row_types, '',
'Crawl Size', 'Pages / Unique Items',
'crawlsize/monthly.png',
data_export_csv='crawlsize/monthly.csv')
# -- cumulative size
row_types = ['page cumul.', 'url estim. cumul.',
'digest estim. cumul.']
self.size_plot(self.size_by_type, row_types, ' cumul\.$',
'Crawl Size Cumulative',
'Pages / Unique Items Cumulative',
'crawlsize/cumulative.png')
# -- new items per crawl
row_types = ['page', 'url estim. new',
'digest estim. new']
self.size_plot(self.size_by_type, row_types, ' new$',
'New Items per Crawl (not observed in prior crawls)',
'Pages / New Items', 'crawlsize/monthly_new.png',
data_export_csv='crawlsize/monthly_new.csv')
# -- cumulative URLs over last N crawls (this and preceding N-1 crawls)
row_types = ['url', '1 crawl', # 'url' replaced by '1 crawl'
'url estim. cumul. last 2 crawls',
'url estim. cumul. last 3 crawls',
'url estim. cumul. last 4 crawls',
'url estim. cumul. last 6 crawls',
'url estim. cumul. last 9 crawls',
'url estim. cumul. last 12 crawls']
data = self.size_by_type
data = data[data['type'].isin(row_types)]
data.replace(to_replace='url', value='1 crawl', inplace=True)
self.size_plot(data, row_types, '^url estim\. cumul\. last | crawls?$',
'URLs Cumulative Over Last N Crawls',
'Unique URLs cumulative',
'crawlsize/url_last_n_crawls.png',
clabel='n crawls',
data_export_csv='crawlsize/url_last_n_crawls.csv')
# -- cumul. digests over last N crawls (this and preceding N-1 crawls)
row_types = ['digest estim.', '1 crawl', # 'url' replaced by '1 crawl'
'digest estim. cumul. last 2 crawls',
'digest estim. cumul. last 3 crawls',
'digest estim. cumul. last 6 crawls',
'digest estim. cumul. last 12 crawls']
data = self.size_by_type
data = data[data['type'].isin(row_types)]
data.replace(to_replace='digest estim.', value='1 crawl', inplace=True)
self.size_plot(data, row_types,
'^digest estim\. cumul\. last | crawls?$',
'Content Digest Cumulative Over Last N Crawls',
'Unique content digests cumulative',
'crawlsize/digest_last_n_crawls.png',
clabel='n crawls')
# -- URLs, hosts, domains, tlds (normalized)
data = self.size_by_type
row_types = ['url', 'tld', 'domain', 'host']
data = data[data['type'].isin(row_types)]
self.export_csv(data, 'crawlsize/domain.csv')
size_norm = data['size'] / 1000.0
data['size'] = size_norm.where(data['type'] == 'tld',
other=data['size'])
data.replace(to_replace='tld', value='tld e+04', inplace=True)
size_norm = size_norm / 10000.0
data['size'] = size_norm.where(data['type'] == 'host',
other=data['size'])
data.replace(to_replace='host', value='host e+07', inplace=True)
data['size'] = size_norm.where(data['type'] == 'domain',
other=data['size'])
data.replace(to_replace='domain', value='domain e+07', inplace=True)
size_norm = size_norm / 100.0
data['size'] = size_norm.where(data.type == 'url',
other=data['size'])
data.replace(to_replace='url', value='url e+09', inplace=True)
self.size_plot(data, '', '',
'URLs / Hosts / Domains / TLDs per Crawl',
'Unique Items', 'crawlsize/domain.png')
def export_csv(self, data, csv):
if csv is not None:
data.reset_index().pivot(index='crawl',
columns='type', values='size').to_csv(
os.path.join(PLOTDIR, csv))
def size_plot(self, data, row_filter, type_name_norm,
title, ylabel, img_file, clabel='', data_export_csv=None):
if len(row_filter) > 0:
data = data[data['type'].isin(row_filter)]
if type_name_norm != '':
for value in row_filter:
replacement = value
if isinstance(type_name_norm, str):
if re.search(type_name_norm, value):
while re.search(type_name_norm, replacement):
replacement = re.sub(type_name_norm,
'', replacement)
elif isinstance(type_name_norm, types.FunctionType):
replacement = type_name_norm(value)
if replacement != value:
data.replace(to_replace=value, value=replacement,
inplace=True)
print(data)
self.export_csv(data, data_export_csv)
return self.line_plot(data, title, ylabel, img_file,
x='date', y='size', c='type', clabel=clabel)
if __name__ == '__main__':
plot = CrawlSizePlot()
plot.read_data(sys.stdin)
plot.cumulative_size()
plot.transform_data()
plot.save_data()
plot.duplicate_ratio()
plot.plot()
| apache-2.0 |
zhongyuanzhou/FCH808.github.io | Intro to Machine Learning/ud120-projects/regression/finance_regression.py | 2 | 2200 | #!/usr/bin/python
"""
starter code for the regression mini-project
loads up/formats a modified version of the dataset
(why modified? we've removed some trouble points
that you'll find yourself in the outliers mini-project)
draws a little scatterplot of the training/testing data
you fill in the regression code where indicated
"""
import sys
import pickle
sys.path.append("../ud120-projects/tools/")
from feature_format import featureFormat, targetFeatureSplit
dictionary = pickle.load( open("../ud120-projects/final_project/final_project_dataset_modified.pkl", "r") )
### list the features you want to look at--first item in the
### list will be the "target" feature
features_list = ["bonus", "salary"]
data = featureFormat( dictionary, features_list, remove_any_zeroes=True)#, "long_term_incentive"], remove_any_zeroes=True )
target, features = targetFeatureSplit( data )
### training-testing split needed in regression, just like classification
from sklearn.cross_validation import train_test_split
feature_train, feature_test, target_train, target_test = train_test_split(features, target, test_size=0.5, random_state=42)
train_color = "b"
test_color = "r"
### your regression goes here!
### please name it reg, so that the plotting code below picks it up and
### plots it correctly
from sklearn.linear_model import LinearRegression
reg = LinearRegression()
reg.fit(feature_train, target_train)
### draw the scatterplot, with color-coded training and testing points
import matplotlib.pyplot as plt
for feature, target in zip(feature_test, target_test):
plt.scatter( feature, target, color=test_color )
for feature, target in zip(feature_train, target_train):
plt.scatter( feature, target, color=train_color )
### labels for the legend
plt.scatter(feature_test[0], target_test[0], color=test_color, label="test")
plt.scatter(feature_test[0], target_test[0], color=train_color, label="train")
### draw the regression line, once it's coded
try:
plt.plot( feature_test, reg.predict(feature_test) )
except NameError:
print "Exception"
pass
plt.xlabel(features_list[1])
plt.ylabel(features_list[0])
plt.legend()
plt.show() | mit |
mementum/backtrader | backtrader/analyzers/pyfolio.py | 1 | 5933 | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015-2020 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import collections
import backtrader as bt
from backtrader.utils.py3 import items, iteritems
from . import TimeReturn, PositionsValue, Transactions, GrossLeverage
class PyFolio(bt.Analyzer):
'''This analyzer uses 4 children analyzers to collect data and transforms it
in to a data set compatible with ``pyfolio``
Children Analyzer
- ``TimeReturn``
Used to calculate the returns of the global portfolio value
- ``PositionsValue``
Used to calculate the value of the positions per data. It sets the
``headers`` and ``cash`` parameters to ``True``
- ``Transactions``
Used to record each transaction on a data (size, price, value). Sets
the ``headers`` parameter to ``True``
- ``GrossLeverage``
Keeps track of the gross leverage (how much the strategy is invested)
Params:
These are passed transparently to the children
- timeframe (default: ``bt.TimeFrame.Days``)
If ``None`` then the timeframe of the 1st data of the system will be
used
- compression (default: `1``)
If ``None`` then the compression of the 1st data of the system will be
used
Both ``timeframe`` and ``compression`` are set following the default
behavior of ``pyfolio`` which is working with *daily* data and upsample it
to obtaine values like yearly returns.
Methods:
- get_analysis
Returns a dictionary with returns as values and the datetime points for
each return as keys
'''
params = (
('timeframe', bt.TimeFrame.Days),
('compression', 1)
)
def __init__(self):
dtfcomp = dict(timeframe=self.p.timeframe,
compression=self.p.compression)
self._returns = TimeReturn(**dtfcomp)
self._positions = PositionsValue(headers=True, cash=True)
self._transactions = Transactions(headers=True)
self._gross_lev = GrossLeverage()
def stop(self):
super(PyFolio, self).stop()
self.rets['returns'] = self._returns.get_analysis()
self.rets['positions'] = self._positions.get_analysis()
self.rets['transactions'] = self._transactions.get_analysis()
self.rets['gross_lev'] = self._gross_lev.get_analysis()
def get_pf_items(self):
'''Returns a tuple of 4 elements which can be used for further processing with
``pyfolio``
returns, positions, transactions, gross_leverage
Because the objects are meant to be used as direct input to ``pyfolio``
this method makes a local import of ``pandas`` to convert the internal
*backtrader* results to *pandas DataFrames* which is the expected input
by, for example, ``pyfolio.create_full_tear_sheet``
The method will break if ``pandas`` is not installed
'''
# keep import local to avoid disturbing installations with no pandas
import pandas
from pandas import DataFrame as DF
#
# Returns
cols = ['index', 'return']
returns = DF.from_records(iteritems(self.rets['returns']),
index=cols[0], columns=cols)
returns.index = pandas.to_datetime(returns.index)
returns.index = returns.index.tz_localize('UTC')
rets = returns['return']
#
# Positions
pss = self.rets['positions']
ps = [[k] + v[-2:] for k, v in iteritems(pss)]
cols = ps.pop(0) # headers are in the first entry
positions = DF.from_records(ps, index=cols[0], columns=cols)
positions.index = pandas.to_datetime(positions.index)
positions.index = positions.index.tz_localize('UTC')
#
# Transactions
txss = self.rets['transactions']
txs = list()
# The transactions have a common key (date) and can potentially happend
# for several assets. The dictionary has a single key and a list of
# lists. Each sublist contains the fields of a transaction
# Hence the double loop to undo the list indirection
for k, v in iteritems(txss):
for v2 in v:
txs.append([k] + v2)
cols = txs.pop(0) # headers are in the first entry
transactions = DF.from_records(txs, index=cols[0], columns=cols)
transactions.index = pandas.to_datetime(transactions.index)
transactions.index = transactions.index.tz_localize('UTC')
# Gross Leverage
cols = ['index', 'gross_lev']
gross_lev = DF.from_records(iteritems(self.rets['gross_lev']),
index=cols[0], columns=cols)
gross_lev.index = pandas.to_datetime(gross_lev.index)
gross_lev.index = gross_lev.index.tz_localize('UTC')
glev = gross_lev['gross_lev']
# Return all together
return rets, positions, transactions, glev
| gpl-3.0 |
mrgloom/h2o-3 | h2o-py/tests/testdir_algos/gbm/pyunit_weights_var_impGBM.py | 3 | 6129 | import sys
sys.path.insert(1, "../../../")
import h2o
import random
def weights_var_imp(ip,port):
def check_same(data1, data2, min_rows_scale):
gbm1_regression = h2o.gbm(x=data1[["displacement", "power", "weight", "acceleration", "year"]],
y=data1["economy"],
min_rows=5,
ntrees=5,
max_depth=2)
gbm2_regression = h2o.gbm(x=data2[["displacement", "power", "weight", "acceleration", "year"]],
y=data2["economy"],
training_frame=data2,
min_rows=5*min_rows_scale,
weights_column="weights",
ntrees=5,
max_depth=2)
gbm1_binomial = h2o.gbm(x=data1[["displacement", "power", "weight", "acceleration", "year"]],
y=data1["economy_20mpg"],
min_rows=5,
distribution="bernoulli",
ntrees=5,
max_depth=2)
gbm2_binomial = h2o.gbm(x=data2[["displacement", "power", "weight", "acceleration", "year"]],
y=data2["economy_20mpg"],
training_frame=data2,
weights_column="weights",
min_rows=5*min_rows_scale,
distribution="bernoulli",
ntrees=5,
max_depth=2)
gbm1_multinomial = h2o.gbm(x=data1[["displacement", "power", "weight", "acceleration", "year"]],
y=data1["cylinders"],
min_rows=5,
distribution="multinomial",
ntrees=5,
max_depth=2)
gbm2_multinomial = h2o.gbm(x=data2[["displacement", "power", "weight", "acceleration", "year"]],
y=data2["cylinders"],
training_frame=data2,
weights_column="weights",
min_rows=5*min_rows_scale,
distribution="multinomial",
ntrees=5,
max_depth=2)
reg1_vi = gbm1_regression.varimp(return_list=True)
reg2_vi = gbm2_regression.varimp(return_list=True)
bin1_vi = gbm1_binomial.varimp(return_list=True)
bin2_vi = gbm2_binomial.varimp(return_list=True)
mul1_vi = gbm1_multinomial.varimp(return_list=True)
mul2_vi = gbm2_multinomial.varimp(return_list=True)
print "Varimp (regresson) no weights vs. weights: {0}, {1}".format(reg1_vi, reg2_vi)
print "Varimp (binomial) no weights vs. weights: {0}, {1}".format(bin1_vi, bin2_vi)
print "Varimp (multinomial) no weights vs. weights: {0}, {1}".format(mul1_vi, mul2_vi)
for rvi1, rvi2 in zip(reg1_vi, reg2_vi): assert rvi1 == rvi1, "Expected vi's (regression) to be the same, but got {0}, and {1}".format(rvi1, rvi2)
for bvi1, bvi2 in zip(bin1_vi, bin2_vi): assert bvi1 == bvi1, "Expected vi's (binomial) to be the same, but got {0}, and {1}".format(bvi1, bvi2)
for mvi1, mvi2 in zip(mul1_vi, mul2_vi): assert mvi1 == mvi1, "Expected vi's (multinomial) to be the same, but got {0}, and {1}".format(mvi1, mvi2)
h2o_cars_data = h2o.import_file(h2o.locate("smalldata/junit/cars_20mpg.csv"))
h2o_cars_data["economy_20mpg"] = h2o_cars_data["economy_20mpg"].asfactor()
h2o_cars_data["cylinders"] = h2o_cars_data["cylinders"].asfactor()
# uniform weights same as no weights
weight = random.randint(1,10)
uniform_weights = [[weight] for r in range(406)]
h2o_uniform_weights = h2o.H2OFrame(python_obj=uniform_weights)
h2o_uniform_weights.setNames(["weights"])
h2o_data_uniform_weights = h2o_cars_data.cbind(h2o_uniform_weights)
print "\n\nChecking that using uniform weights is equivalent to no weights:"
check_same(h2o_cars_data, h2o_data_uniform_weights, weight)
# zero weights same as removed observations
zero_weights = [[0] if random.randint(0,1) else [1] for r in range(406)]
h2o_zero_weights = h2o.H2OFrame(python_obj=zero_weights)
h2o_zero_weights.setNames(["weights"])
h2o_data_zero_weights = h2o_cars_data.cbind(h2o_zero_weights)
h2o_data_zeros_removed = h2o_cars_data[h2o_zero_weights["weights"] == 1]
print "\n\nChecking that using some zero weights is equivalent to removing those observations:"
check_same(h2o_data_zeros_removed, h2o_data_zero_weights, 1)
# doubled weights same as doubled observations
doubled_weights = [[1] if random.randint(0,1) else [2] for r in range(406)]
h2o_doubled_weights = h2o.H2OFrame(python_obj=doubled_weights)
h2o_doubled_weights.setNames(["weights"])
h2o_data_doubled_weights = h2o_cars_data.cbind(h2o_doubled_weights)
doubled_data = h2o.as_list(h2o_cars_data, use_pandas=False)
colnames = doubled_data.pop(0)
for idx, w in enumerate(doubled_weights):
if w[0] == 2: doubled_data.append(doubled_data[idx])
h2o_data_doubled = h2o.H2OFrame(python_obj=doubled_data)
h2o_data_doubled.setNames(colnames)
h2o_data_doubled["economy_20mpg"] = h2o_data_doubled["economy_20mpg"].asfactor()
h2o_data_doubled["cylinders"] = h2o_data_doubled["cylinders"].asfactor()
h2o_data_doubled_weights["economy_20mpg"] = h2o_data_doubled_weights["economy_20mpg"].asfactor()
h2o_data_doubled_weights["cylinders"] = h2o_data_doubled_weights["cylinders"].asfactor()
print "\n\nChecking that doubling some weights is equivalent to doubling those observations:"
check_same(h2o_data_doubled, h2o_data_doubled_weights, 1)
if __name__ == "__main__":
h2o.run_test(sys.argv, weights_var_imp)
| apache-2.0 |
aabadie/scikit-learn | sklearn/metrics/cluster/__init__.py | 91 | 1468 | """
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import fowlkes_mallows_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .unsupervised import calinski_harabaz_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"fowlkes_mallows_score", "entropy", "silhouette_samples",
"silhouette_score", "calinski_harabaz_score", "consensus_score"]
| bsd-3-clause |
ozburo/youtube-dl | youtube_dl/extractor/wsj.py | 30 | 4694 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
float_or_none,
unified_strdate,
)
class WSJIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
https?://video-api\.wsj\.com/api-video/player/iframe\.html\?.*?\bguid=|
https?://(?:www\.)?(?:wsj|barrons)\.com/video/(?:[^/]+/)+|
wsj:
)
(?P<id>[a-fA-F0-9-]{36})
'''
IE_DESC = 'Wall Street Journal'
_TESTS = [{
'url': 'http://video-api.wsj.com/api-video/player/iframe.html?guid=1BD01A4C-BFE8-40A5-A42F-8A8AF9898B1A',
'md5': 'e230a5bb249075e40793b655a54a02e4',
'info_dict': {
'id': '1BD01A4C-BFE8-40A5-A42F-8A8AF9898B1A',
'ext': 'mp4',
'upload_date': '20150202',
'uploader_id': 'jdesai',
'creator': 'jdesai',
'categories': list, # a long list
'duration': 90,
'title': 'Bills Coach Rex Ryan Updates His Old Jets Tattoo',
},
}, {
'url': 'http://www.wsj.com/video/can-alphabet-build-a-smarter-city/359DDAA8-9AC1-489C-82E6-0429C1E430E0.html',
'only_matching': True,
}, {
'url': 'http://www.barrons.com/video/capitalism-deserves-more-respect-from-millennials/F301217E-6F46-43AE-B8D2-B7180D642EE9.html',
'only_matching': True,
}, {
'url': 'https://www.wsj.com/video/series/a-brief-history-of/the-modern-cell-carrier-how-we-got-here/980E2187-401D-48A1-B82B-1486CEE06CB9',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
info = self._download_json(
'http://video-api.wsj.com/api-video/find_all_videos.asp', video_id,
query={
'type': 'guid',
'count': 1,
'query': video_id,
'fields': ','.join((
'type', 'hls', 'videoMP4List', 'thumbnailList', 'author',
'description', 'name', 'duration', 'videoURL', 'titletag',
'formattedCreationDate', 'keywords', 'editor')),
})['items'][0]
title = info.get('name', info.get('titletag'))
formats = []
f4m_url = info.get('videoURL')
if f4m_url:
formats.extend(self._extract_f4m_formats(
f4m_url, video_id, f4m_id='hds', fatal=False))
m3u8_url = info.get('hls')
if m3u8_url:
formats.extend(self._extract_m3u8_formats(
info['hls'], video_id, ext='mp4',
entry_protocol='m3u8_native', m3u8_id='hls', fatal=False))
for v in info.get('videoMP4List', []):
mp4_url = v.get('url')
if not mp4_url:
continue
tbr = int_or_none(v.get('bitrate'))
formats.append({
'url': mp4_url,
'format_id': 'http' + ('-%d' % tbr if tbr else ''),
'tbr': tbr,
'width': int_or_none(v.get('width')),
'height': int_or_none(v.get('height')),
'fps': float_or_none(v.get('fps')),
})
self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
# Thumbnails are conveniently in the correct format already
'thumbnails': info.get('thumbnailList'),
'creator': info.get('author'),
'uploader_id': info.get('editor'),
'duration': int_or_none(info.get('duration')),
'upload_date': unified_strdate(info.get(
'formattedCreationDate'), day_first=False),
'title': title,
'categories': info.get('keywords'),
}
class WSJArticleIE(InfoExtractor):
_VALID_URL = r'(?i)https?://(?:www\.)?wsj\.com/articles/(?P<id>[^/?#&]+)'
_TEST = {
'url': 'https://www.wsj.com/articles/dont-like-china-no-pandas-for-you-1490366939?',
'info_dict': {
'id': '4B13FA62-1D8C-45DB-8EA1-4105CB20B362',
'ext': 'mp4',
'upload_date': '20170221',
'uploader_id': 'ralcaraz',
'title': 'Bao Bao the Panda Leaves for China',
}
}
def _real_extract(self, url):
article_id = self._match_id(url)
webpage = self._download_webpage(url, article_id)
video_id = self._search_regex(
r'data-src=["\']([a-fA-F0-9-]{36})', webpage, 'video id')
return self.url_result('wsj:%s' % video_id, WSJIE.ie_key(), video_id)
| unlicense |
michaelneuder/image_quality_analysis | bin/nets/old/SSIM_double_feed_normalization.py | 1 | 7641 | #!/usr/bin/env python3
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import numpy as np
np.set_printoptions(threshold=np.nan)
import tensorflow as tf
import time
import pandas as pd
def convolve_inner_layers(x, W, b):
y = tf.nn.conv2d(x, W, strides = [1,1,1,1], padding='SAME')
y = tf.nn.bias_add(y, b)
return tf.nn.tanh(y)
def convolve_ouput_layer(x, W, b):
y = tf.nn.conv2d(x, W, strides = [1,1,1,1], padding='SAME')
y = tf.nn.bias_add(y, b)
return y
def conv_net(x, W, b):
conv1 = convolve_inner_layers(x, W['weights1'], b['bias1'])
conv2 = convolve_inner_layers(conv1, W['weights2'], b['bias2'])
conv3 = convolve_inner_layers(conv2, W['weights3'], b['bias3'])
output_feed = tf.concat([conv1, conv2, conv3],3)
output = convolve_ouput_layer(output_feed, W['weights_out'], b['bias_out'])
return output
def get_variance(training_target):
all_pixels = training_target.flatten()
return all_pixels.var()
def get_epoch(x, y, n):
input_size = x.shape[0]
number_batches = input_size // n
extra_examples = input_size % n
batches = {}
batch_indices = np.arange(input_size)
np.random.shuffle(batch_indices)
for i in range(number_batches):
temp_indices = batch_indices[n*i:n*(i+1)]
temp_x = []
temp_y = []
for j in temp_indices:
temp_x.append(x[j])
temp_y.append(y[j])
batches[i] = [np.asarray(temp_x), np.asarray(temp_y)]
if extra_examples != 0:
extra_indices = batch_indices[input_size-extra_examples:input_size]
temp_x = []
temp_y = []
for k in extra_indices:
temp_x.append(x[k])
temp_y.append(y[k])
batches[i+1] = [np.asarray(temp_x), np.asarray(temp_y)]
return batches
def normalize_input(train_data, test_data):
mean, std_dev = np.mean(train_data, axis=0), np.std(train_data, axis=0)
return (train_data - mean) / std_dev, (test_data - mean) / std_dev
def main():
# parameters
filter_dim = 11
filter_dim2 = 11
batch_size = 4
image_dim = 96
input_layer = 2
first_layer = 50
second_layer = 25
third_layer = 10
output_layer = 1
learning_rate = .01
epochs = 10000
# seeding for debug purposes --- dont forget to remove
# SEED = 12345
# np.random.seed(SEED)
# tf.set_random_seed(SEED)
print('loading image files ... ')
# train/test images
orig_500 = pd.read_csv('https://raw.githubusercontent.com/michaelneuder/image_quality_analysis/master/data/sample_data/orig_500.txt', header=None, delim_whitespace = True)
recon_500 = pd.read_csv('https://raw.githubusercontent.com/michaelneuder/image_quality_analysis/master/data/sample_data/recon_500.txt', header=None, delim_whitespace = True)
SSIM_500 = pd.read_csv('https://raw.githubusercontent.com/michaelneuder/image_quality_analysis/master/data/sample_data/SSIM_500.txt', header=None, delim_whitespace = True)
orig_140 = pd.read_csv('https://raw.githubusercontent.com/michaelneuder/image_quality_analysis/master/data/sample_data/orig_140.txt', header=None, delim_whitespace = True)
recon_140 = pd.read_csv('https://raw.githubusercontent.com/michaelneuder/image_quality_analysis/master/data/sample_data/recon_140.txt', header=None, delim_whitespace = True)
SSIM_140 = pd.read_csv('https://raw.githubusercontent.com/michaelneuder/image_quality_analysis/master/data/sample_data/SSIM_140.txt', header=None, delim_whitespace = True)
# normaliztion
original_images_train = orig_500.values
reconstructed_images_train = recon_500.values
original_images_test = orig_140.values
reconstructed_images_test = recon_140.values
training_input = np.dstack((original_images_train, reconstructed_images_train))
testing_input = np.dstack((original_images_test, reconstructed_images_test))
training_input_normalized, testing_input_normalized = normalize_input(training_input, testing_input)
comparison_images_train = SSIM_500.values
comparison_images_test = SSIM_140.values
# get size of training and testing set
train_size = original_images_train.shape[0]
test_size = original_images_test.shape[0]
# reshaping the result data to --- (num pics), 96, 96, 1
target_data_train = np.reshape(comparison_images_train, [train_size, image_dim, image_dim, 1])
target_data_test = np.reshape(comparison_images_test, [test_size, image_dim, image_dim, 1])
# reshaping
train_data = np.reshape(training_input_normalized, [train_size,image_dim,image_dim,2])
test_data = np.reshape(testing_input_normalized, [test_size,image_dim,image_dim,2])
# initializing variables --- fan in
scaling_factor = 1.0
initializer = tf.contrib.layers.variance_scaling_initializer(factor=scaling_factor, mode='FAN_IN')
weights = {
'weights1': tf.get_variable('weights1', [filter_dim,filter_dim,input_layer,first_layer], initializer=initializer),
'weights2': tf.get_variable('weights2', [filter_dim2,filter_dim2,first_layer,second_layer], initializer=initializer),
'weights3': tf.get_variable('weights3', [filter_dim2,filter_dim2,second_layer,third_layer], initializer=initializer),
'weights_out': tf.get_variable('weights4', [filter_dim2,filter_dim2,third_layer+second_layer+first_layer,output_layer], initializer=initializer)
}
biases = {
'bias1': tf.get_variable('bias1', [first_layer], initializer=initializer),
'bias2': tf.get_variable('bias2', [second_layer], initializer=initializer),
'bias3': tf.get_variable('bias3', [third_layer], initializer=initializer),
'bias_out': tf.get_variable('bias4', [output_layer], initializer=initializer)
}
# tf Graph input
x = tf.placeholder(tf.float32, [None, image_dim, image_dim, 2])
y = tf.placeholder(tf.float32, [None, image_dim, image_dim, 1])
# model
prediction = conv_net(x, weights, biases)
# get variance to normalize error terms during training
variance = get_variance(target_data_train)
# loss and optimization
cost = tf.reduce_mean(tf.square(tf.subtract(prediction, y)))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# session
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
epoch_count = 0
global_step = 0
start_time = time.time()
print("starting training ... ")
while epoch_count < epochs:
print('---------------------------------------------------------')
print('beginning epoch {} ...'.format(epoch_count))
epoch = get_epoch(train_data, target_data_train, batch_size)
for i in epoch:
x_data_train, y_data_train = np.asarray(epoch[i][0]), np.asarray(epoch[i][1])
sess.run(optimizer, feed_dict={x : x_data_train, y : y_data_train})
loss = sess.run(cost, feed_dict={x : x_data_train, y : y_data_train})
percent_error = 100*loss/variance
print(" - training global_step {0:4d} error: {1:8.4f} {2:8.2f}%".format(global_step, loss, percent_error))
global_step += 1
epoch_count+=1
print('optimization finished!')
print('\nstarting testing...')
score = sess.run(cost, feed_dict={x: test_data, y: target_data_test})
percent_error = 100*score/variance
pred = sess.run(prediction, feed_dict={x: test_data})
print('---- test score : {:.4f}, {:.4f}% ----'.format(score, percent_error))
if __name__ == '__main__':
main()
| mit |
TUW-GEO/rt1 | tests/test_rtfit_dumps.py | 1 | 6189 | """
Test the fits-module by loading a dumped rtfits result and performing
all actions again
"""
import unittest
import numpy as np
import cloudpickle
import matplotlib.pyplot as plt
import copy
import os
class TestDUMPS(unittest.TestCase):
def setUp(self):
self.sig0_dB_path = os.path.dirname(__file__) + os.sep + "sig0_dB.dump"
self.sig0_linear_path = os.path.dirname(__file__) + os.sep + "sig0_linear.dump"
def load_data(self, path):
with open(path, 'rb') as file:
fit = cloudpickle.load(file)
return fit
# self.assertTrue(
# err < errdict[key],
# msg='derived error' + str(err) + 'too high for ' + str(key))
def test_rtplots(self):
for path, msg in zip([self.sig0_dB_path, self.sig0_linear_path],
['dB', 'linear']):
print(f'testing plotfunctions for {msg} fit')
fit = self.load_data(path)
# call performfit to re-initialize _fnevals functions
# and evaluate intermediate results
# (they might have been removed if symeninge has been used)
fit.lsq_kwargs['verbose'] = 0
fit.performfit(intermediate_results=True,
print_progress=True)
# get list of available plot-methods
method_list = [func for func in dir(fit.plot) if
callable(getattr(fit.plot, func)) and not func.startswith("__")]
for function_name in method_list:
print(f'... {function_name}')
if function_name == 'printsig0analysis':
# check 'dataset' index slider
f, s1, s2 = fit.plot.__getattribute__(function_name)(
range2=2, range1=1, use_index='dataset')
# check update functions
s1.set_val(1)
s2.set_val(1)
plt.close(f)
# check 'groups' index slider
f, s1, s2 = fit.plot.__getattribute__(function_name)(
range2=2, range1=1, use_index='groups')
# check update functions
s1.set_val(1)
s2.set_val(1)
plt.close(f)
elif function_name == 'analyzemodel':
f, sliders, txt_but = fit.plot.__getattribute__(
function_name)()
# check update functions
for key, s in sliders.items():
s.set_val((s.valmax - s.valmin)/2.)
for key, b in txt_but.items():
if key == 'buttons':
# the initial status is ALL OFF
stat = b.get_status()
for i in range(len(stat)):
b.set_active(i)
# now all should be ON
self.assertTrue(np.all(b.get_status()))
for i in range(len(stat)):
b.set_active(i)
# now all should be OFF again
self.assertTrue(~np.all(b.get_status()))
else:
# set the boundaries of the parameters
if 'min' in key:
b.set_val(0.02)
if 'max' in key:
b.set_val(0.99)
plt.close(f)
elif function_name == 'intermediate_residuals':
# check default (e.g. pandas datetime-offset)
f = fit.plot.__getattribute__(function_name)(fmt='%d.%b %Y')
plt.close(f)
# check grouping with respect to incidence angles and
# convert the labels to degrees
f = fit.plot.__getattribute__(function_name)(
grp=('inc', 10),
label_formatter=lambda x,y:round(np.rad2deg(x),2))
plt.close(f)
# check grouping with respect to datetimes
f = fit.plot.__getattribute__(function_name)(grp='groups')
plt.close(f)
# check grouping with respect to the dataset index
f = fit.plot.__getattribute__(function_name)(
grp='dataset', plottype='2D', fmt='%Y %b %d (%H:%M)')
plt.close(f)
else:
f = fit.plot.__getattribute__(function_name)()
plt.close(f)
def test_performfit(self):
for path, msg in zip([self.sig0_dB_path, self.sig0_linear_path],
['dB', 'linear']):
print(f'testing plotfunctions for {msg} fit')
fit = self.load_data(path)
old_results = fit.res_dict
# print model definition
fit.model_definition
print('testing performfit')
fit.lsq_kwargs['verbose'] = 0
fit.performfit(intermediate_results=True,
print_progress=True)
# call _cache_info() to make coveralls happy
fit._cache_info()
fit.R._cache_info()
# try to dump the file again (without fit-details)
fit.dump(os.path.join(os.path.dirname(__file__), 'testdump1.dump'),
mini=True)
# try to dump the file again (with fit-details)
fit.dump(os.path.join(os.path.dirname(__file__), 'testdump2.dump'),
mini=False)
for key, val in old_results.items():
self.assertTrue(np.allclose(fit.res_dict[key],
old_results[key], atol=1e-3, rtol=1e-3),
msg=f'fitted values for {msg} fit of {key} ' +
f'differ by {np.subtract(fit.res_dict[key], old_results[key]).mean()}')
if __name__ == "__main__":
unittest.main() | apache-2.0 |
ajaybhat/email-categorization | ml/classifier.py | 1 | 1774 | from pprint import pprint
import random
from nltk import NaiveBayesClassifier
from nltk.classify.util import accuracy
from sklearn.cross_validation import KFold
from training_set_util import load_training_set
from ml_util import extract_bigrams
categories = {1: 'Development',
2: 'Personal/Professional communication',
3: 'Technical communication',
4: 'HR communication',
5: 'Autogenerated emails/Others'}
def bag_of_words(words):
return dict([(word, True) for word in words])
def create_training_dict(text, category):
tokens = extract_bigrams(text)
return [(bag_of_words(tokens), category)]
def classify(text, sender=None, subject=None):
training_set = load_training_set()
classifier = NaiveBayesClassifier.train(training_set)
test_data = bag_of_words(extract_bigrams(text))
if sender is not None:
test_data[sender] = True
if subject is not None:
test_data[subject] = True
classified = classifier.prob_classify(test_data)
pprint({categories[sample]: classified.prob(sample) for sample in classified.samples()})
return categories[classified.max()]
def cross_validate():
training_set = load_training_set()
random.shuffle(training_set)
average = 0
cv = KFold(len(training_set), n_folds=10, indices=True, shuffle=False, random_state=None)
for traincv, evalcv in cv:
classifier = NaiveBayesClassifier.train(training_set[traincv[0]:traincv[len(traincv) - 1]])
acc = accuracy(classifier, training_set[evalcv[0]:evalcv[len(evalcv) - 1]])
print 'Range: ', evalcv[0], 'to', evalcv[len(evalcv) - 1]
print 'Accuracy: %4.2f' % acc
average += acc
print 'Average accuracy: %4.2f' % (average / 10)
| gpl-2.0 |
pprett/scikit-learn | examples/plot_kernel_approximation.py | 26 | 8069 | """
==================================================
Explicit feature map approximation for RBF kernels
==================================================
An example illustrating the approximation of the feature map
of an RBF kernel.
.. currentmodule:: sklearn.kernel_approximation
It shows how to use :class:`RBFSampler` and :class:`Nystroem` to
approximate the feature map of an RBF kernel for classification with an SVM on
the digits dataset. Results using a linear SVM in the original space, a linear
SVM using the approximate mappings and using a kernelized SVM are compared.
Timings and accuracy for varying amounts of Monte Carlo samplings (in the case
of :class:`RBFSampler`, which uses random Fourier features) and different sized
subsets of the training set (for :class:`Nystroem`) for the approximate mapping
are shown.
Please note that the dataset here is not large enough to show the benefits
of kernel approximation, as the exact SVM is still reasonably fast.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
The second plot visualized the decision surfaces of the RBF kernel SVM and
the linear SVM with approximate kernel maps.
The plot shows decision surfaces of the classifiers projected onto
the first two principal components of the data. This visualization should
be taken with a grain of salt since it is just an interesting slice through
the decision surface in 64 dimensions. In particular note that
a datapoint (represented as a dot) does not necessarily be classified
into the region it is lying in, since it will not lie on the plane
that the first two principal components span.
The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail
in :ref:`kernel_approximation`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
from time import time
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, pipeline
from sklearn.kernel_approximation import (RBFSampler,
Nystroem)
from sklearn.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = (data[:n_samples // 2],
digits.target[:n_samples // 2])
# Now predict the value of the digit on the second half:
data_test, targets_test = (data[n_samples // 2:],
digits.target[n_samples // 2:])
# data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map_fourier = RBFSampler(gamma=.2, random_state=1)
feature_map_nystroem = Nystroem(gamma=.2, random_state=1)
fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier),
("svm", svm.LinearSVC())])
nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem),
("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 30 * np.arange(1, 10)
fourier_scores = []
nystroem_scores = []
fourier_times = []
nystroem_times = []
for D in sample_sizes:
fourier_approx_svm.set_params(feature_map__n_components=D)
nystroem_approx_svm.set_params(feature_map__n_components=D)
start = time()
nystroem_approx_svm.fit(data_train, targets_train)
nystroem_times.append(time() - start)
start = time()
fourier_approx_svm.fit(data_train, targets_train)
fourier_times.append(time() - start)
fourier_score = fourier_approx_svm.score(data_test, targets_test)
nystroem_score = nystroem_approx_svm.score(data_test, targets_test)
nystroem_scores.append(nystroem_score)
fourier_scores.append(fourier_score)
# plot the results:
plt.figure(figsize=(8, 8))
accuracy = plt.subplot(211)
# second y axis for timeings
timescale = plt.subplot(212)
accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel")
timescale.plot(sample_sizes, nystroem_times, '--',
label='Nystroem approx. kernel')
accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel")
timescale.plot(sample_sizes, fourier_times, '--',
label='Fourier approx. kernel')
# horizontal lines for exact rbf and linear kernels:
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_score, linear_svm_score], label="linear svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_time, linear_svm_time], '--', label='linear svm')
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_score, kernel_svm_score], label="rbf svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_time, kernel_svm_time], '--', label='rbf svm')
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(fourier_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc='best')
timescale.legend(loc='best')
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Generate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = ['SVC with rbf kernel',
'SVC (linear kernel)\n with Fourier rbf feature map\n'
'n_components=100',
'SVC (linear kernel)\n with Nystroem rbf feature map\n'
'n_components=100']
plt.tight_layout()
plt.figure(figsize=(12, 5))
# predict and plot
for i, clf in enumerate((kernel_svm, nystroem_approx_svm,
fourier_approx_svm)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
plt.subplot(1, 3, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.tight_layout()
plt.show()
| bsd-3-clause |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/scipy/signal/_arraytools.py | 28 | 7553 | """
Functions for acting on a axis of an array.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
def axis_slice(a, start=None, stop=None, step=None, axis=-1):
"""Take a slice along axis 'axis' from 'a'.
Parameters
----------
a : numpy.ndarray
The array to be sliced.
start, stop, step : int or None
The slice parameters.
axis : int, optional
The axis of `a` to be sliced.
Examples
--------
>>> a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> axis_slice(a, start=0, stop=1, axis=1)
array([[1],
[4],
[7]])
>>> axis_slice(a, start=1, axis=0)
array([[4, 5, 6],
[7, 8, 9]])
Notes
-----
The keyword arguments start, stop and step are used by calling
slice(start, stop, step). This implies axis_slice() does not
handle its arguments the exacty the same as indexing. To select
a single index k, for example, use
axis_slice(a, start=k, stop=k+1)
In this case, the length of the axis 'axis' in the result will
be 1; the trivial dimension is not removed. (Use numpy.squeeze()
to remove trivial axes.)
"""
a_slice = [slice(None)] * a.ndim
a_slice[axis] = slice(start, stop, step)
b = a[a_slice]
return b
def axis_reverse(a, axis=-1):
"""Reverse the 1-d slices of `a` along axis `axis`.
Returns axis_slice(a, step=-1, axis=axis).
"""
return axis_slice(a, step=-1, axis=axis)
def odd_ext(x, n, axis=-1):
"""
Odd extension at the boundaries of an array
Generate a new ndarray by making an odd extension of `x` along an axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import odd_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> odd_ext(a, 2)
array([[-1, 0, 1, 2, 3, 4, 5, 6, 7],
[-4, -1, 0, 1, 4, 9, 16, 23, 28]])
Odd extension is a "180 degree rotation" at the endpoints of the original
array:
>>> t = np.linspace(0, 1.5, 100)
>>> a = 0.9 * np.sin(2 * np.pi * t**2)
>>> b = odd_ext(a, 40)
>>> import matplotlib.pyplot as plt
>>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='odd extension')
>>> plt.plot(arange(100), a, 'r', lw=2, label='original')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 1:
return x
if n > x.shape[axis] - 1:
raise ValueError(("The extension length n (%d) is too big. " +
"It must not exceed x.shape[axis]-1, which is %d.")
% (n, x.shape[axis] - 1))
left_end = axis_slice(x, start=0, stop=1, axis=axis)
left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)
right_end = axis_slice(x, start=-1, axis=axis)
right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)
ext = np.concatenate((2 * left_end - left_ext,
x,
2 * right_end - right_ext),
axis=axis)
return ext
def even_ext(x, n, axis=-1):
"""
Even extension at the boundaries of an array
Generate a new ndarray by making an even extension of `x` along an axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import even_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> even_ext(a, 2)
array([[ 3, 2, 1, 2, 3, 4, 5, 4, 3],
[ 4, 1, 0, 1, 4, 9, 16, 9, 4]])
Even extension is a "mirror image" at the boundaries of the original array:
>>> t = np.linspace(0, 1.5, 100)
>>> a = 0.9 * np.sin(2 * np.pi * t**2)
>>> b = even_ext(a, 40)
>>> import matplotlib.pyplot as plt
>>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='even extension')
>>> plt.plot(arange(100), a, 'r', lw=2, label='original')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 1:
return x
if n > x.shape[axis] - 1:
raise ValueError(("The extension length n (%d) is too big. " +
"It must not exceed x.shape[axis]-1, which is %d.")
% (n, x.shape[axis] - 1))
left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)
right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)
ext = np.concatenate((left_ext,
x,
right_ext),
axis=axis)
return ext
def const_ext(x, n, axis=-1):
"""
Constant extension at the boundaries of an array
Generate a new ndarray that is a constant extension of `x` along an axis.
The extension repeats the values at the first and last element of
the axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import const_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> const_ext(a, 2)
array([[ 1, 1, 1, 2, 3, 4, 5, 5, 5],
[ 0, 0, 0, 1, 4, 9, 16, 16, 16]])
Constant extension continues with the same values as the endpoints of the
array:
>>> t = np.linspace(0, 1.5, 100)
>>> a = 0.9 * np.sin(2 * np.pi * t**2)
>>> b = const_ext(a, 40)
>>> import matplotlib.pyplot as plt
>>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='constant extension')
>>> plt.plot(arange(100), a, 'r', lw=2, label='original')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 1:
return x
left_end = axis_slice(x, start=0, stop=1, axis=axis)
ones_shape = [1] * x.ndim
ones_shape[axis] = n
ones = np.ones(ones_shape, dtype=x.dtype)
left_ext = ones * left_end
right_end = axis_slice(x, start=-1, axis=axis)
right_ext = ones * right_end
ext = np.concatenate((left_ext,
x,
right_ext),
axis=axis)
return ext
def zero_ext(x, n, axis=-1):
"""
Zero padding at the boundaries of an array
Generate a new ndarray that is a zero padded extension of `x` along
an axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the
axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import zero_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> zero_ext(a, 2)
array([[ 0, 0, 1, 2, 3, 4, 5, 0, 0],
[ 0, 0, 0, 1, 4, 9, 16, 0, 0]])
"""
if n < 1:
return x
zeros_shape = list(x.shape)
zeros_shape[axis] = n
zeros = np.zeros(zeros_shape, dtype=x.dtype)
ext = np.concatenate((zeros, x, zeros), axis=axis)
return ext
| mit |
fspaolo/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 8 | 7108 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
# this test was not actually passing before!
raise SkipTest
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dico1 = MiniBatchDictionaryLearning(n_components, n_iter=10, batch_size=1,
shuffle=False, dict_init=V,
random_state=0).fit(X)
dico2 = MiniBatchDictionaryLearning(n_components, n_iter=1, dict_init=V,
random_state=0)
for ii, sample in enumerate(X):
dico2.partial_fit(sample, iter_offset=ii * dico2.n_iter)
# if ii == 1: break
assert_true(not np.all(sparse_encode(X, dico1.components_, alpha=100) ==
0))
assert_array_equal(dico1.components_, dico2.components_)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
| bsd-3-clause |
paulromano/openmc | tests/regression_tests/mgxs_library_no_nuclides/test.py | 6 | 2709 | import hashlib
import openmc
import openmc.mgxs
from openmc.examples import pwr_pin_cell
from tests.testing_harness import PyAPITestHarness
class MGXSTestHarness(PyAPITestHarness):
def __init__(self, *args, **kwargs):
# Generate inputs using parent class routine
super().__init__(*args, **kwargs)
# Initialize a two-group structure
energy_groups = openmc.mgxs.EnergyGroups(group_edges=[0, 0.625, 20.e6])
# Initialize MGXS Library for a few cross section types
self.mgxs_lib = openmc.mgxs.Library(self._model.geometry)
self.mgxs_lib.by_nuclide = False
# Test relevant MGXS types
relevant_MGXS_TYPES = [item for item in openmc.mgxs.MGXS_TYPES
if item != 'current']
# Add in a subset of openmc.mgxs.ARBITRARY_VECTOR_TYPES and
# openmc.mgxs.ARBITRARY_MATRIX_TYPES so we can see the code works,
# but not use too much resources
relevant_MGXS_TYPES += [
"(n,elastic)", "(n,level)", "(n,2n)", "(n,na)", "(n,nc)",
"(n,gamma)", "(n,a)", "(n,Xa)", "heating", "damage-energy",
"(n,n1)", "(n,a0)", "(n,nc) matrix", "(n,n1) matrix",
"(n,2n) matrix"]
self.mgxs_lib.mgxs_types = tuple(relevant_MGXS_TYPES) + \
openmc.mgxs.MDGXS_TYPES
self.mgxs_lib.energy_groups = energy_groups
self.mgxs_lib.num_delayed_groups = 6
self.mgxs_lib.legendre_order = 3
self.mgxs_lib.domain_type = 'material'
self.mgxs_lib.build_library()
# Add tallies
self.mgxs_lib.add_to_tallies_file(self._model.tallies, merge=False)
def _get_results(self, hash_output=False):
"""Digest info in the statepoint and return as a string."""
# Read the statepoint file.
sp = openmc.StatePoint(self._sp_name)
# Load the MGXS library from the statepoint
self.mgxs_lib.load_from_statepoint(sp)
# Build a string from Pandas Dataframe for each MGXS
outstr = ''
for domain in self.mgxs_lib.domains:
for mgxs_type in self.mgxs_lib.mgxs_types:
mgxs = self.mgxs_lib.get_mgxs(domain, mgxs_type)
df = mgxs.get_pandas_dataframe()
outstr += mgxs_type + '\n' + df.to_string() + '\n'
# Hash the results if necessary
if hash_output:
sha512 = hashlib.sha512()
sha512.update(outstr.encode('utf-8'))
outstr = sha512.hexdigest()
return outstr
def test_mgxs_library_no_nuclides():
model = pwr_pin_cell()
harness = MGXSTestHarness('statepoint.10.h5', model)
harness.main()
| mit |
Jimmy-Morzaria/scikit-learn | sklearn/tests/test_kernel_ridge.py | 342 | 3027 | import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_almost_equal
X, y = make_regression(n_features=10)
Xcsr = sp.csr_matrix(X)
Xcsc = sp.csc_matrix(X)
Y = np.array([y, y]).T
def test_kernel_ridge():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csr():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsr, y).predict(Xcsr)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsr, y).predict(Xcsr)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csc():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsc, y).predict(Xcsc)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_singular_kernel():
# alpha=0 causes a LinAlgError in computing the dual coefficients,
# which causes a fallback to a lstsq solver. This is tested here.
pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X)
kr = KernelRidge(kernel="linear", alpha=0)
ignore_warnings(kr.fit)(X, y)
pred2 = kr.predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed():
for kernel in ["linear", "rbf", "poly", "cosine"]:
K = pairwise_kernels(X, X, metric=kernel)
pred = KernelRidge(kernel=kernel).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed_kernel_unchanged():
K = np.dot(X, X.T)
K2 = K.copy()
KernelRidge(kernel="precomputed").fit(K, y)
assert_array_almost_equal(K, K2)
def test_kernel_ridge_sample_weights():
K = np.dot(X, X.T) # precomputed kernel
sw = np.random.RandomState(0).rand(X.shape[0])
pred = Ridge(alpha=1,
fit_intercept=False).fit(X, y, sample_weight=sw).predict(X)
pred2 = KernelRidge(kernel="linear",
alpha=1).fit(X, y, sample_weight=sw).predict(X)
pred3 = KernelRidge(kernel="precomputed",
alpha=1).fit(K, y, sample_weight=sw).predict(K)
assert_array_almost_equal(pred, pred2)
assert_array_almost_equal(pred, pred3)
def test_kernel_ridge_multi_output():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X)
assert_array_almost_equal(pred, pred2)
pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
pred3 = np.array([pred3, pred3]).T
assert_array_almost_equal(pred2, pred3)
| bsd-3-clause |
Petr-Kovalev/nupic-win32 | external/linux32/lib/python2.6/site-packages/matplotlib/__init__.py | 69 | 28184 | """
This is an object-orient plotting library.
A procedural interface is provided by the companion pylab module,
which may be imported directly, e.g::
from pylab import *
or using ipython::
ipython -pylab
For the most part, direct use of the object-oriented library is
encouraged when programming rather than working interactively. The
exceptions are the pylab commands :func:`~matplotlib.pyplot.figure`,
:func:`~matplotlib.pyplot.subplot`,
:func:`~matplotlib.backends.backend_qt4agg.show`, and
:func:`~pyplot.savefig`, which can greatly simplify scripting.
Modules include:
:mod:`matplotlib.axes`
defines the :class:`~matplotlib.axes.Axes` class. Most pylab
commands are wrappers for :class:`~matplotlib.axes.Axes`
methods. The axes module is the highest level of OO access to
the library.
:mod:`matplotlib.figure`
defines the :class:`~matplotlib.figure.Figure` class.
:mod:`matplotlib.artist`
defines the :class:`~matplotlib.artist.Artist` base class for
all classes that draw things.
:mod:`matplotlib.lines`
defines the :class:`~matplotlib.lines.Line2D` class for
drawing lines and markers
:mod`matplotlib.patches`
defines classes for drawing polygons
:mod:`matplotlib.text`
defines the :class:`~matplotlib.text.Text`,
:class:`~matplotlib.text.TextWithDash`, and
:class:`~matplotlib.text.Annotate` classes
:mod:`matplotlib.image`
defines the :class:`~matplotlib.image.AxesImage` and
:class:`~matplotlib.image.FigureImage` classes
:mod:`matplotlib.collections`
classes for efficient drawing of groups of lines or polygons
:mod:`matplotlib.colors`
classes for interpreting color specifications and for making
colormaps
:mod:`matplotlib.cm`
colormaps and the :class:`~matplotlib.image.ScalarMappable`
mixin class for providing color mapping functionality to other
classes
:mod:`matplotlib.ticker`
classes for calculating tick mark locations and for formatting
tick labels
:mod:`matplotlib.backends`
a subpackage with modules for various gui libraries and output
formats
The base matplotlib namespace includes:
:data:`~matplotlib.rcParams`
a global dictionary of default configuration settings. It is
initialized by code which may be overridded by a matplotlibrc
file.
:func:`~matplotlib.rc`
a function for setting groups of rcParams values
:func:`~matplotlib.use`
a function for setting the matplotlib backend. If used, this
function must be called immediately after importing matplotlib
for the first time. In particular, it must be called
**before** importing pylab (if pylab is imported).
matplotlib is written by John D. Hunter (jdh2358 at gmail.com) and a
host of others.
"""
from __future__ import generators
__version__ = '0.98.5.2'
__revision__ = '$Revision: 6660 $'
__date__ = '$Date: 2008-12-18 06:10:51 -0600 (Thu, 18 Dec 2008) $'
import os, re, shutil, subprocess, sys, warnings
import distutils.sysconfig
import distutils.version
NEWCONFIG = False
# Needed for toolkit setuptools support
if 0:
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
pass # must not have setuptools
if not hasattr(sys, 'argv'): # for modpython
sys.argv = ['modpython']
"""
Manage user customizations through a rc file.
The default file location is given in the following order
- environment variable MATPLOTLIBRC
- HOME/.matplotlib/matplotlibrc if HOME is defined
- PATH/matplotlibrc where PATH is the return value of
get_data_path()
"""
import sys, os, tempfile
from rcsetup import defaultParams, validate_backend, validate_toolbar
from rcsetup import validate_cairo_format
major, minor1, minor2, s, tmp = sys.version_info
_python24 = major>=2 and minor1>=4
# the havedate check was a legacy from old matplotlib which preceeded
# datetime support
_havedate = True
#try:
# import pkg_resources # pkg_resources is part of setuptools
#except ImportError: _have_pkg_resources = False
#else: _have_pkg_resources = True
if not _python24:
raise ImportError('matplotlib requires Python 2.4 or later')
import numpy
nn = numpy.__version__.split('.')
if not (int(nn[0]) >= 1 and int(nn[1]) >= 1):
raise ImportError(
'numpy 1.1 or later is required; you have %s' % numpy.__version__)
def is_string_like(obj):
if hasattr(obj, 'shape'): return 0
try: obj + ''
except (TypeError, ValueError): return 0
return 1
def _is_writable_dir(p):
"""
p is a string pointing to a putative writable dir -- return True p
is such a string, else False
"""
try: p + '' # test is string like
except TypeError: return False
try:
t = tempfile.TemporaryFile(dir=p)
t.write('1')
t.close()
except OSError: return False
else: return True
class Verbose:
"""
A class to handle reporting. Set the fileo attribute to any file
instance to handle the output. Default is sys.stdout
"""
levels = ('silent', 'helpful', 'debug', 'debug-annoying')
vald = dict( [(level, i) for i,level in enumerate(levels)])
# parse the verbosity from the command line; flags look like
# --verbose-silent or --verbose-helpful
_commandLineVerbose = None
for arg in sys.argv[1:]:
if not arg.startswith('--verbose-'): continue
_commandLineVerbose = arg[10:]
def __init__(self):
self.set_level('silent')
self.fileo = sys.stdout
def set_level(self, level):
'set the verbosity to one of the Verbose.levels strings'
if self._commandLineVerbose is not None:
level = self._commandLineVerbose
if level not in self.levels:
raise ValueError('Illegal verbose string "%s". Legal values are %s'%(level, self.levels))
self.level = level
def set_fileo(self, fname):
std = {
'sys.stdout': sys.stdout,
'sys.stderr': sys.stderr,
}
if fname in std:
self.fileo = std[fname]
else:
try:
fileo = file(fname, 'w')
except IOError:
raise ValueError('Verbose object could not open log file "%s" for writing.\nCheck your matplotlibrc verbose.fileo setting'%fname)
else:
self.fileo = fileo
def report(self, s, level='helpful'):
"""
print message s to self.fileo if self.level>=level. Return
value indicates whether a message was issued
"""
if self.ge(level):
print >>self.fileo, s
return True
return False
def wrap(self, fmt, func, level='helpful', always=True):
"""
return a callable function that wraps func and reports it
output through the verbose handler if current verbosity level
is higher than level
if always is True, the report will occur on every function
call; otherwise only on the first time the function is called
"""
assert callable(func)
def wrapper(*args, **kwargs):
ret = func(*args, **kwargs)
if (always or not wrapper._spoke):
spoke = self.report(fmt%ret, level)
if not wrapper._spoke: wrapper._spoke = spoke
return ret
wrapper._spoke = False
wrapper.__doc__ = func.__doc__
return wrapper
def ge(self, level):
'return true if self.level is >= level'
return self.vald[self.level]>=self.vald[level]
verbose=Verbose()
def checkdep_dvipng():
try:
s = subprocess.Popen(['dvipng','-version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
line = s.stdout.readlines()[1]
v = line.split()[-1]
return v
except (IndexError, ValueError, OSError):
return None
def checkdep_ghostscript():
try:
if sys.platform == 'win32':
command_args = ['gswin32c', '--version']
else:
command_args = ['gs', '--version']
s = subprocess.Popen(command_args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
v = s.stdout.read()[:-1]
return v
except (IndexError, ValueError, OSError):
return None
def checkdep_tex():
try:
s = subprocess.Popen(['tex','-version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
line = s.stdout.readlines()[0]
pattern = '3\.1\d+'
match = re.search(pattern, line)
v = match.group(0)
return v
except (IndexError, ValueError, AttributeError, OSError):
return None
def checkdep_pdftops():
try:
s = subprocess.Popen(['pdftops','-v'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
for line in s.stderr:
if 'version' in line:
v = line.split()[-1]
return v
except (IndexError, ValueError, UnboundLocalError, OSError):
return None
def compare_versions(a, b):
"return True if a is greater than or equal to b"
if a:
a = distutils.version.LooseVersion(a)
b = distutils.version.LooseVersion(b)
if a>=b: return True
else: return False
else: return False
def checkdep_ps_distiller(s):
if not s:
return False
flag = True
gs_req = '7.07'
gs_sugg = '7.07'
gs_v = checkdep_ghostscript()
if compare_versions(gs_v, gs_sugg): pass
elif compare_versions(gs_v, gs_req):
verbose.report(('ghostscript-%s found. ghostscript-%s or later '
'is recommended to use the ps.usedistiller option.') % (gs_v, gs_sugg))
else:
flag = False
warnings.warn(('matplotlibrc ps.usedistiller option can not be used '
'unless ghostscript-%s or later is installed on your system') % gs_req)
if s == 'xpdf':
pdftops_req = '3.0'
pdftops_req_alt = '0.9' # poppler version numbers, ugh
pdftops_v = checkdep_pdftops()
if compare_versions(pdftops_v, pdftops_req):
pass
elif compare_versions(pdftops_v, pdftops_req_alt) and not \
compare_versions(pdftops_v, '1.0'):
pass
else:
flag = False
warnings.warn(('matplotlibrc ps.usedistiller can not be set to '
'xpdf unless xpdf-%s or later is installed on your system') % pdftops_req)
if flag:
return s
else:
return False
def checkdep_usetex(s):
if not s:
return False
tex_req = '3.1415'
gs_req = '7.07'
gs_sugg = '7.07'
dvipng_req = '1.5'
flag = True
tex_v = checkdep_tex()
if compare_versions(tex_v, tex_req): pass
else:
flag = False
warnings.warn(('matplotlibrc text.usetex option can not be used '
'unless TeX-%s or later is '
'installed on your system') % tex_req)
dvipng_v = checkdep_dvipng()
if compare_versions(dvipng_v, dvipng_req): pass
else:
flag = False
warnings.warn( 'matplotlibrc text.usetex can not be used with *Agg '
'backend unless dvipng-1.5 or later is '
'installed on your system')
gs_v = checkdep_ghostscript()
if compare_versions(gs_v, gs_sugg): pass
elif compare_versions(gs_v, gs_req):
verbose.report(('ghostscript-%s found. ghostscript-%s or later is '
'recommended for use with the text.usetex '
'option.') % (gs_v, gs_sugg))
else:
flag = False
warnings.warn(('matplotlibrc text.usetex can not be used '
'unless ghostscript-%s or later is '
'installed on your system') % gs_req)
return flag
def _get_home():
"""Find user's home directory if possible.
Otherwise raise error.
:see: http://mail.python.org/pipermail/python-list/2005-February/263921.html
"""
path=''
try:
path=os.path.expanduser("~")
except:
pass
if not os.path.isdir(path):
for evar in ('HOME', 'USERPROFILE', 'TMP'):
try:
path = os.environ[evar]
if os.path.isdir(path):
break
except: pass
if path:
return path
else:
raise RuntimeError('please define environment variable $HOME')
get_home = verbose.wrap('$HOME=%s', _get_home, always=False)
def _get_configdir():
"""
Return the string representing the configuration dir.
default is HOME/.matplotlib. you can override this with the
MPLCONFIGDIR environment variable
"""
configdir = os.environ.get('MPLCONFIGDIR')
if configdir is not None:
if not _is_writable_dir(configdir):
raise RuntimeError('Could not write to MPLCONFIGDIR="%s"'%configdir)
return configdir
h = get_home()
p = os.path.join(get_home(), '.matplotlib')
if os.path.exists(p):
if not _is_writable_dir(p):
raise RuntimeError("'%s' is not a writable dir; you must set %s/.matplotlib to be a writable dir. You can also set environment variable MPLCONFIGDIR to any writable directory where you want matplotlib data stored "% (h, h))
else:
if not _is_writable_dir(h):
raise RuntimeError("Failed to create %s/.matplotlib; consider setting MPLCONFIGDIR to a writable directory for matplotlib configuration data"%h)
os.mkdir(p)
return p
get_configdir = verbose.wrap('CONFIGDIR=%s', _get_configdir, always=False)
def _get_data_path():
'get the path to matplotlib data'
if 'MATPLOTLIBDATA' in os.environ:
path = os.environ['MATPLOTLIBDATA']
if not os.path.isdir(path):
raise RuntimeError('Path in environment MATPLOTLIBDATA not a directory')
return path
path = os.sep.join([os.path.dirname(__file__), 'mpl-data'])
if os.path.isdir(path): return path
# setuptools' namespace_packages may highjack this init file
# so need to try something known to be in matplotlib, not basemap
import matplotlib.afm
path = os.sep.join([os.path.dirname(matplotlib.afm.__file__), 'mpl-data'])
if os.path.isdir(path): return path
# py2exe zips pure python, so still need special check
if getattr(sys,'frozen',None):
path = os.path.join(os.path.split(sys.path[0])[0], 'mpl-data')
if os.path.isdir(path): return path
else:
# Try again assuming we need to step up one more directory
path = os.path.join(os.path.split(os.path.split(sys.path[0])[0])[0],
'mpl-data')
if os.path.isdir(path): return path
else:
# Try again assuming sys.path[0] is a dir not a exe
path = os.path.join(sys.path[0], 'mpl-data')
if os.path.isdir(path): return path
raise RuntimeError('Could not find the matplotlib data files')
def _get_data_path_cached():
if defaultParams['datapath'][0] is None:
defaultParams['datapath'][0] = _get_data_path()
return defaultParams['datapath'][0]
get_data_path = verbose.wrap('matplotlib data path %s', _get_data_path_cached,
always=False)
def get_example_data(fname):
"""
return a filehandle to one of the example files in mpl-data/example
*fname*
the name of one of the files in mpl-data/example
"""
datadir = os.path.join(get_data_path(), 'example')
fullpath = os.path.join(datadir, fname)
if not os.path.exists(fullpath):
raise IOError('could not find matplotlib example file "%s" in data directory "%s"'%(
fname, datadir))
return file(fullpath, 'rb')
def get_py2exe_datafiles():
datapath = get_data_path()
head, tail = os.path.split(datapath)
d = {}
for root, dirs, files in os.walk(datapath):
# Need to explicitly remove cocoa_agg files or py2exe complains
# NOTE I dont know why, but do as previous version
if 'Matplotlib.nib' in files:
files.remove('Matplotlib.nib')
files = [os.path.join(root, filename) for filename in files]
root = root.replace(tail, 'mpl-data')
root = root[root.index('mpl-data'):]
d[root] = files
return d.items()
def matplotlib_fname():
"""
Return the path to the rc file
Search order:
* current working dir
* environ var MATPLOTLIBRC
* HOME/.matplotlib/matplotlibrc
* MATPLOTLIBDATA/matplotlibrc
"""
oldname = os.path.join( os.getcwd(), '.matplotlibrc')
if os.path.exists(oldname):
print >> sys.stderr, """\
WARNING: Old rc filename ".matplotlibrc" found in working dir
and and renamed to new default rc file name "matplotlibrc"
(no leading"dot"). """
shutil.move('.matplotlibrc', 'matplotlibrc')
home = get_home()
oldname = os.path.join( home, '.matplotlibrc')
if os.path.exists(oldname):
configdir = get_configdir()
newname = os.path.join(configdir, 'matplotlibrc')
print >> sys.stderr, """\
WARNING: Old rc filename "%s" found and renamed to
new default rc file name "%s"."""%(oldname, newname)
shutil.move(oldname, newname)
fname = os.path.join( os.getcwd(), 'matplotlibrc')
if os.path.exists(fname): return fname
if 'MATPLOTLIBRC' in os.environ:
path = os.environ['MATPLOTLIBRC']
if os.path.exists(path):
fname = os.path.join(path, 'matplotlibrc')
if os.path.exists(fname):
return fname
fname = os.path.join(get_configdir(), 'matplotlibrc')
if os.path.exists(fname): return fname
path = get_data_path() # guaranteed to exist or raise
fname = os.path.join(path, 'matplotlibrc')
if not os.path.exists(fname):
warnings.warn('Could not find matplotlibrc; using defaults')
return fname
_deprecated_map = {
'text.fontstyle': 'font.style',
'text.fontangle': 'font.style',
'text.fontvariant': 'font.variant',
'text.fontweight': 'font.weight',
'text.fontsize': 'font.size',
'tick.size' : 'tick.major.size',
}
class RcParams(dict):
"""
A dictionary object including validation
validating functions are defined and associated with rc parameters in
:mod:`matplotlib.rcsetup`
"""
validate = dict([ (key, converter) for key, (default, converter) in \
defaultParams.iteritems() ])
def __setitem__(self, key, val):
try:
if key in _deprecated_map.keys():
alt = _deprecated_map[key]
warnings.warn('%s is deprecated in matplotlibrc. Use %s \
instead.'% (key, alt))
key = alt
cval = self.validate[key](val)
dict.__setitem__(self, key, cval)
except KeyError:
raise KeyError('%s is not a valid rc parameter.\
See rcParams.keys() for a list of valid parameters.'%key)
def rc_params(fail_on_error=False):
'Return the default params updated from the values in the rc file'
fname = matplotlib_fname()
if not os.path.exists(fname):
# this should never happen, default in mpl-data should always be found
message = 'could not find rc file; returning defaults'
ret = RcParams([ (key, default) for key, (default, converter) in \
defaultParams.iteritems() ])
warnings.warn(message)
return ret
cnt = 0
rc_temp = {}
for line in file(fname):
cnt += 1
strippedline = line.split('#',1)[0].strip()
if not strippedline: continue
tup = strippedline.split(':',1)
if len(tup) !=2:
warnings.warn('Illegal line #%d\n\t%s\n\tin file "%s"'%\
(cnt, line, fname))
continue
key, val = tup
key = key.strip()
val = val.strip()
if key in rc_temp:
warnings.warn('Duplicate key in file "%s", line #%d'%(fname,cnt))
rc_temp[key] = (val, line, cnt)
ret = RcParams([ (key, default) for key, (default, converter) in \
defaultParams.iteritems() ])
for key in ('verbose.level', 'verbose.fileo'):
if key in rc_temp:
val, line, cnt = rc_temp.pop(key)
if fail_on_error:
ret[key] = val # try to convert to proper type or raise
else:
try: ret[key] = val # try to convert to proper type or skip
except Exception, msg:
warnings.warn('Bad val "%s" on line #%d\n\t"%s"\n\tin file \
"%s"\n\t%s' % (val, cnt, line, fname, msg))
verbose.set_level(ret['verbose.level'])
verbose.set_fileo(ret['verbose.fileo'])
for key, (val, line, cnt) in rc_temp.iteritems():
if key in defaultParams:
if fail_on_error:
ret[key] = val # try to convert to proper type or raise
else:
try: ret[key] = val # try to convert to proper type or skip
except Exception, msg:
warnings.warn('Bad val "%s" on line #%d\n\t"%s"\n\tin file \
"%s"\n\t%s' % (val, cnt, line, fname, msg))
else:
print >> sys.stderr, """
Bad key "%s" on line %d in
%s.
You probably need to get an updated matplotlibrc file from
http://matplotlib.sf.net/_static/matplotlibrc or from the matplotlib source
distribution""" % (key, cnt, fname)
if ret['datapath'] is None:
ret['datapath'] = get_data_path()
if not ret['text.latex.preamble'] == ['']:
verbose.report("""
*****************************************************************
You have the following UNSUPPORTED LaTeX preamble customizations:
%s
Please do not ask for support with these customizations active.
*****************************************************************
"""% '\n'.join(ret['text.latex.preamble']), 'helpful')
verbose.report('loaded rc file %s'%fname)
return ret
# this is the instance used by the matplotlib classes
rcParams = rc_params()
rcParamsDefault = RcParams([ (key, default) for key, (default, converter) in \
defaultParams.iteritems() ])
rcParams['ps.usedistiller'] = checkdep_ps_distiller(rcParams['ps.usedistiller'])
rcParams['text.usetex'] = checkdep_usetex(rcParams['text.usetex'])
def rc(group, **kwargs):
"""
Set the current rc params. Group is the grouping for the rc, eg.
for ``lines.linewidth`` the group is ``lines``, for
``axes.facecolor``, the group is ``axes``, and so on. Group may
also be a list or tuple of group names, eg. (*xtick*, *ytick*).
*kwargs* is a dictionary attribute name/value pairs, eg::
rc('lines', linewidth=2, color='r')
sets the current rc params and is equivalent to::
rcParams['lines.linewidth'] = 2
rcParams['lines.color'] = 'r'
The following aliases are available to save typing for interactive
users:
===== =================
Alias Property
===== =================
'lw' 'linewidth'
'ls' 'linestyle'
'c' 'color'
'fc' 'facecolor'
'ec' 'edgecolor'
'mew' 'markeredgewidth'
'aa' 'antialiased'
===== =================
Thus you could abbreviate the above rc command as::
rc('lines', lw=2, c='r')
Note you can use python's kwargs dictionary facility to store
dictionaries of default parameters. Eg, you can customize the
font rc as follows::
font = {'family' : 'monospace',
'weight' : 'bold',
'size' : 'larger'}
rc('font', **font) # pass in the font dict as kwargs
This enables you to easily switch between several configurations.
Use :func:`~matplotlib.pyplot.rcdefaults` to restore the default
rc params after changes.
"""
aliases = {
'lw' : 'linewidth',
'ls' : 'linestyle',
'c' : 'color',
'fc' : 'facecolor',
'ec' : 'edgecolor',
'mew' : 'markeredgewidth',
'aa' : 'antialiased',
}
if is_string_like(group):
group = (group,)
for g in group:
for k,v in kwargs.items():
name = aliases.get(k) or k
key = '%s.%s' % (g, name)
if key not in rcParams:
raise KeyError('Unrecognized key "%s" for group "%s" and name "%s"' %
(key, g, name))
rcParams[key] = v
def rcdefaults():
"""
Restore the default rc params - the ones that were created at
matplotlib load time.
"""
rcParams.update(rcParamsDefault)
if NEWCONFIG:
#print "importing from reorganized config system!"
try:
from config import rcParams, rcdefaults, mplConfig, save_config
verbose.set_level(rcParams['verbose.level'])
verbose.set_fileo(rcParams['verbose.fileo'])
except:
from config import rcParams, rcdefaults
_use_error_msg = """ This call to matplotlib.use() has no effect
because the the backend has already been chosen;
matplotlib.use() must be called *before* pylab, matplotlib.pyplot,
or matplotlib.backends is imported for the first time.
"""
def use(arg, warn=True):
"""
Set the matplotlib backend to one of the known backends.
The argument is case-insensitive. For the Cairo backend,
the argument can have an extension to indicate the type of
output. Example:
use('cairo.pdf')
will specify a default of pdf output generated by Cairo.
Note: this function must be called *before* importing pylab for
the first time; or, if you are not using pylab, it must be called
before importing matplotlib.backends. If warn is True, a warning
is issued if you try and callthis after pylab or pyplot have been
loaded. In certain black magic use cases, eg
pyplot.switch_backends, we are doing the reloading necessary to
make the backend switch work (in some cases, eg pure image
backends) so one can set warn=False to supporess the warnings
"""
if 'matplotlib.backends' in sys.modules:
if warn: warnings.warn(_use_error_msg)
return
arg = arg.lower()
if arg.startswith('module://'):
name = arg
else:
be_parts = arg.split('.')
name = validate_backend(be_parts[0])
rcParams['backend'] = name
if name == 'cairo' and len(be_parts) > 1:
rcParams['cairo.format'] = validate_cairo_format(be_parts[1])
def get_backend():
"Returns the current backend"
return rcParams['backend']
def interactive(b):
"""
Set interactive mode to boolean b.
If b is True, then draw after every plotting command, eg, after xlabel
"""
rcParams['interactive'] = b
def is_interactive():
'Return true if plot mode is interactive'
b = rcParams['interactive']
return b
def tk_window_focus():
"""Return true if focus maintenance under TkAgg on win32 is on.
This currently works only for python.exe and IPython.exe.
Both IDLE and Pythonwin.exe fail badly when tk_window_focus is on."""
if rcParams['backend'] != 'TkAgg':
return False
return rcParams['tk.window_focus']
# Now allow command line to override
# Allow command line access to the backend with -d (matlab compatible
# flag)
for s in sys.argv[1:]:
if s.startswith('-d') and len(s) > 2: # look for a -d flag
try:
use(s[2:])
except (KeyError, ValueError):
pass
# we don't want to assume all -d flags are backends, eg -debug
verbose.report('matplotlib version %s'%__version__)
verbose.report('verbose.level %s'%verbose.level)
verbose.report('interactive is %s'%rcParams['interactive'])
verbose.report('units is %s'%rcParams['units'])
verbose.report('platform is %s'%sys.platform)
verbose.report('loaded modules: %s'%sys.modules.keys(), 'debug')
| gpl-3.0 |
bdh1011/wau | venv/lib/python2.7/site-packages/pandas/tests/test_msgpack/test_seq.py | 6 | 1439 | #!/usr/bin/env python
# coding: utf-8
from pandas import compat
from pandas.compat import u
import pandas.msgpack as msgpack
binarydata = [chr(i) for i in range(256)]
binarydata = "".join(binarydata)
if compat.PY3:
binarydata = binarydata.encode('utf-8')
def gen_binary_data(idx):
data = binarydata[:idx % 300]
return data
def test_exceeding_unpacker_read_size():
dumpf = compat.BytesIO()
packer = msgpack.Packer()
NUMBER_OF_STRINGS = 6
read_size = 16
# 5 ok for read_size=16, while 6 glibc detected *** python: double free or corruption (fasttop):
# 20 ok for read_size=256, while 25 segfaults / glibc detected *** python: double free or corruption (!prev)
# 40 ok for read_size=1024, while 50 introduces errors
# 7000 ok for read_size=1024*1024, while 8000 leads to glibc detected *** python: double free or corruption (!prev):
for idx in range(NUMBER_OF_STRINGS):
data = gen_binary_data(idx)
dumpf.write(packer.pack(data))
f = compat.BytesIO(dumpf.getvalue())
dumpf.close()
unpacker = msgpack.Unpacker(f, read_size=read_size, use_list=1)
read_count = 0
for idx, o in enumerate(unpacker):
assert type(o) == bytes
assert o == gen_binary_data(idx)
read_count += 1
assert read_count == NUMBER_OF_STRINGS
| mit |
derricw/asciisciit | asciisciit/conversions.py | 1 | 9470 | """
conversions.py
@author: derricw
Conversion functions.
"""
from bisect import bisect
import random
import os
from PIL import Image, ImageOps, ImageDraw, ImageFont
import numpy as np
import cv2
import imageio
from asciisciit.misc import *
from asciisciit.lut import get_lut, relative_width
DEFAULT_ASPECT_CORRECTION_FACTOR = 6.0/11.0
RESOURCE_DIR = os.path.join(os.path.dirname(__file__),'res')
def image_to_ascii(img,
scalefactor=0.2,
invert=False,
equalize=True,
lut='simple',
aspect_correction_factor=None):
"""
Generates and ascii string from an image of some kind.
Parameters
----------
img : str, ndarray, PIL.Image
Image to convert
scalefactor : float
ASCII chars per pixel
invert : bool
Invert luminance?
equalize : bool
Equalize histogram?
Returns
-------
str
Examples
--------
>>> ascii_img = image_to_ascii("http://i.imgur.com/l2FU2J0.jpg", scalefactor=0.3)
>>> print(ascii_img)
"""
if type(img) == str:
img = open_pil_img(img)
elif type(img) == np.ndarray:
img = numpy_to_pil(img)
try:
text = pil_to_ascii(img, scalefactor, invert, equalize, lut,
aspect_correction_factor)
except:
raise TypeError("That image type doesn't work. Try PIL, Numpy, or file path...")
return text
def pil_to_ascii(img,
scalefactor=0.2,
invert=False,
equalize=True,
lut='simple',
aspect_correction_factor=None
):
"""
Generates an ascii string from a PIL image.
Parameters
----------
img : PIL.Image
PIL image to transform.
scalefactor : float
ASCII characters per pixel.
invert : bool
Invert luminance?
equalize : bool
equalize histogram (for best results do this).
lut : str
Name of the lookup table to use. Currently supports 'simple' and
'binary'.
Returns
-------
str
Examples
--------
>>> from asciisciit.misc import open_pil_img
>>> img = open_pil_img("http://i.imgur.com/l2FU2J0.jpg")
>>> text_img = pil_to_ascii(img, scalefactor=0.3)
>>> print(text_img)
>>> from PIL import Image
>>> img = Image.open("some_image.png")
>>> text_img = pil_to_ascii(img)
>>> print(text_img)
"""
lookup = get_lut(lut)
if aspect_correction_factor is None:
aspect_correction_factor = get_aspect_correction_factor(lookup.exemplar)
img = img.resize(
(int(img.size[0]*scalefactor),
int(img.size[1]*scalefactor*aspect_correction_factor)),
Image.BILINEAR)
img = img.convert("L") # convert to mono
if equalize:
img = ImageOps.equalize(img)
if invert:
img = ImageOps.invert(img)
img = np.array(img, dtype=np.uint8)
return u"\n" + u"".join(lookup.apply(img).flatten().tolist())
def ascii_to_pil(text, font_size=10, bg_color=(20, 20, 20),
fg_color=(255, 255, 255), font_path=None):
"""
Renders Ascii text to an Image of the appropriate size, using text of the
specified font size.
Parameters
----------
text : str
Ascii text to render.
font_size : int (10)
Font size for rendered image.
bg_color : tuple (20,20,20)
(R,G,B) values for image background.
fg_color : tuple (255,255,255)
(R,G,B) values for text color. -1 gets value from image.
font_path : str
Use a custom font .ttf file.
Returns
-------
PIL.Image
Examples
--------
>>> ascii = AsciiImage("http://i.imgur.com/l2FU2J0.jpg", scalefactor=0.4)
>>> pil = ascii_to_pil(ascii.data)
>>> pil.show()
"""
font = get_font(font_path, font_size)
if relative_width(text[1]) == 2:
font_width, font_height = font.getsize(u"\u3000")
else:
font_width, font_height = font.getsize(u" ")
img_height, img_width = get_ascii_image_size(text)
y_padding = 1
out_img = np.zeros(((font_height+y_padding)*img_height,
font_width*img_width,
3),
dtype=np.uint8)
out_img[:, :, 0] += bg_color[0]
out_img[:, :, 1] += bg_color[1]
out_img[:, :, 2] += bg_color[2]
img = Image.fromarray(out_img)
draw = ImageDraw.Draw(img)
for index, line in enumerate(text.split("\n")):
y = (font_height+y_padding)*index
draw.text((0, y), line, fg_color, font=font)
return img
def ascii_seq_to_gif(seq, output_path, fps=15.0, font_size=10,
font_path=None):
""" Creates a gif from a sequence of ascii images.
Parameters
----------
output_path : str
Path for gif output.
fps : float
FPS for gif playback.
font_size : int
Font size for ascii.
"""
images = []
status = StatusBar(len(seq), text="Generating frames: ",)
for index, ascii_img in enumerate(seq):
if type(ascii_img) == str:
#raw text
text = ascii_img
else:
#AsciiImage instance
text = ascii_img.data
images.append(
ascii_to_pil(text,
font_size=font_size,
font_path=font_path
)
)
status.update(index)
status.complete()
duration = 1.0/fps
images_np = [np.array(img) for img in images]
imageio.mimsave(output_path, images_np, duration=duration)
def numpy_to_ascii(img,
scalefactor=0.2,
invert=False,
equalize=True,
lut="simple",
aspect_correction_factor=None):
"""
Generates an ascii string from a numpy image.
Parameters
----------
img : ndarray
PIL image to transform.
scalefactor : float
ASCII characters per pixel.
invert : bool
Invert luminance?
equalize : bool
equalize histogram (for best results do this).
lut : str
Name of the lookup table to use. Currently supports 'simple' and
'binary'.
Returns
-------
str
"""
lookup = get_lut(lut)
if aspect_correction_factor is None:
aspect_correction_factor = get_aspect_correction_factor(lookup.exemplar)
h, w = img.shape
img = cv2.resize(
img,
(
int(w*scalefactor),
int(h*scalefactor*aspect_correction_factor)
)
)
if img.ndim == 3: # weak check for RGB
# works in opencv 3.4.3 but who knows, they keep moving/renaming stuff
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
if equalize:
img=cv2.equalizeHist(img)
if invert:
img = 255-img
return u"\n" + u"".join(lookup.apply(img).flatten().tolist())
def image_to_numpy(path):
"""
Image file to numpy matrix.
"""
img = open_pil_img(path)
return np.array(img, dtype=np.uint8)
def numpy_to_pil(nparray):
"""
Numpy matrix to PIL Image.
"""
return Image.fromarray(nparray)
def get_font(font_path=None, font_size=10):
if not font_path:
font_path = os.path.join(RESOURCE_DIR, "Cousine-Regular.ttf")
return ImageFont.truetype(font_path, font_size)
def get_aspect_correction_factor(exemplar, font_path=None, font_size=10):
if font_path is None:
factor = relative_width(exemplar)*DEFAULT_ASPECT_CORRECTION_FACTOR
else:
font = get_font(font_path, font_size)
width, height = font.getsize(exemplar)
factor = float(width) / height
return factor
def gif_to_numpy(gif_path):
"""
Converts a GIF into a numpy movie.
"""
gif = open_pil_img(gif_path)
if hasattr(gif, 'info'):
frame_duration = gif.info.get('duration', None)
else:
frame_duration = None
length = get_length_of_gif(gif)
size = gif.size
status = StatusBar(length, "Reading frames: ")
frames = []
frame_count = 0
while gif:
new_img = Image.new("RGBA",size)
new_img.paste(gif)
frames.append(new_img)
frame_count += 1
try:
gif.seek(frame_count)
except EOFError:
break
status.update(frame_count)
status.complete()
assert(length==len(frames))
final_frame_count = len(frames)
frame1 = np.array(frames[0])
shape = frame1.shape
matrix = np.zeros((final_frame_count,shape[0],shape[1],shape[2]), dtype=np.uint8)
for i, frame in enumerate(frames):
img = np.asarray(frame)
matrix[i] = img
return matrix, frame_duration
def figure_to_numpy(mpl_figure):
"""
Converts a matplotlib figure to numpy matrix.
"""
mpl_figure.tight_layout(pad=0.1)
mpl_figure.canvas.draw()
data = np.fromstring(mpl_figure.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(mpl_figure.canvas.get_width_height()[::-1]+ (3,))
return data
def figure_to_ascii(mpl_figure):
"""
Converts a matplotlib figure to ascii image.
"""
npy_fig = figure_to_numpy(mpl_figure)
return image_to_ascii(npy_fig, scalefactor=0.15, invert=False, equalize=False)
if __name__ == '__main__':
pass
| mit |
Tom94/tev | scripts/turbo_colormap.py | 2 | 8223 | # Copyright 2019 Google LLC.
# SPDX-License-Identifier: Apache-2.0
# Author: Anton Mikhailov
turbo_colormap_data = [[0.18995,0.07176,0.23217],[0.19483,0.08339,0.26149],[0.19956,0.09498,0.29024],[0.20415,0.10652,0.31844],[0.20860,0.11802,0.34607],[0.21291,0.12947,0.37314],[0.21708,0.14087,0.39964],[0.22111,0.15223,0.42558],[0.22500,0.16354,0.45096],[0.22875,0.17481,0.47578],[0.23236,0.18603,0.50004],[0.23582,0.19720,0.52373],[0.23915,0.20833,0.54686],[0.24234,0.21941,0.56942],[0.24539,0.23044,0.59142],[0.24830,0.24143,0.61286],[0.25107,0.25237,0.63374],[0.25369,0.26327,0.65406],[0.25618,0.27412,0.67381],[0.25853,0.28492,0.69300],[0.26074,0.29568,0.71162],[0.26280,0.30639,0.72968],[0.26473,0.31706,0.74718],[0.26652,0.32768,0.76412],[0.26816,0.33825,0.78050],[0.26967,0.34878,0.79631],[0.27103,0.35926,0.81156],[0.27226,0.36970,0.82624],[0.27334,0.38008,0.84037],[0.27429,0.39043,0.85393],[0.27509,0.40072,0.86692],[0.27576,0.41097,0.87936],[0.27628,0.42118,0.89123],[0.27667,0.43134,0.90254],[0.27691,0.44145,0.91328],[0.27701,0.45152,0.92347],[0.27698,0.46153,0.93309],[0.27680,0.47151,0.94214],[0.27648,0.48144,0.95064],[0.27603,0.49132,0.95857],[0.27543,0.50115,0.96594],[0.27469,0.51094,0.97275],[0.27381,0.52069,0.97899],[0.27273,0.53040,0.98461],[0.27106,0.54015,0.98930],[0.26878,0.54995,0.99303],[0.26592,0.55979,0.99583],[0.26252,0.56967,0.99773],[0.25862,0.57958,0.99876],[0.25425,0.58950,0.99896],[0.24946,0.59943,0.99835],[0.24427,0.60937,0.99697],[0.23874,0.61931,0.99485],[0.23288,0.62923,0.99202],[0.22676,0.63913,0.98851],[0.22039,0.64901,0.98436],[0.21382,0.65886,0.97959],[0.20708,0.66866,0.97423],[0.20021,0.67842,0.96833],[0.19326,0.68812,0.96190],[0.18625,0.69775,0.95498],[0.17923,0.70732,0.94761],[0.17223,0.71680,0.93981],[0.16529,0.72620,0.93161],[0.15844,0.73551,0.92305],[0.15173,0.74472,0.91416],[0.14519,0.75381,0.90496],[0.13886,0.76279,0.89550],[0.13278,0.77165,0.88580],[0.12698,0.78037,0.87590],[0.12151,0.78896,0.86581],[0.11639,0.79740,0.85559],[0.11167,0.80569,0.84525],[0.10738,0.81381,0.83484],[0.10357,0.82177,0.82437],[0.10026,0.82955,0.81389],[0.09750,0.83714,0.80342],[0.09532,0.84455,0.79299],[0.09377,0.85175,0.78264],[0.09287,0.85875,0.77240],[0.09267,0.86554,0.76230],[0.09320,0.87211,0.75237],[0.09451,0.87844,0.74265],[0.09662,0.88454,0.73316],[0.09958,0.89040,0.72393],[0.10342,0.89600,0.71500],[0.10815,0.90142,0.70599],[0.11374,0.90673,0.69651],[0.12014,0.91193,0.68660],[0.12733,0.91701,0.67627],[0.13526,0.92197,0.66556],[0.14391,0.92680,0.65448],[0.15323,0.93151,0.64308],[0.16319,0.93609,0.63137],[0.17377,0.94053,0.61938],[0.18491,0.94484,0.60713],[0.19659,0.94901,0.59466],[0.20877,0.95304,0.58199],[0.22142,0.95692,0.56914],[0.23449,0.96065,0.55614],[0.24797,0.96423,0.54303],[0.26180,0.96765,0.52981],[0.27597,0.97092,0.51653],[0.29042,0.97403,0.50321],[0.30513,0.97697,0.48987],[0.32006,0.97974,0.47654],[0.33517,0.98234,0.46325],[0.35043,0.98477,0.45002],[0.36581,0.98702,0.43688],[0.38127,0.98909,0.42386],[0.39678,0.99098,0.41098],[0.41229,0.99268,0.39826],[0.42778,0.99419,0.38575],[0.44321,0.99551,0.37345],[0.45854,0.99663,0.36140],[0.47375,0.99755,0.34963],[0.48879,0.99828,0.33816],[0.50362,0.99879,0.32701],[0.51822,0.99910,0.31622],[0.53255,0.99919,0.30581],[0.54658,0.99907,0.29581],[0.56026,0.99873,0.28623],[0.57357,0.99817,0.27712],[0.58646,0.99739,0.26849],[0.59891,0.99638,0.26038],[0.61088,0.99514,0.25280],[0.62233,0.99366,0.24579],[0.63323,0.99195,0.23937],[0.64362,0.98999,0.23356],[0.65394,0.98775,0.22835],[0.66428,0.98524,0.22370],[0.67462,0.98246,0.21960],[0.68494,0.97941,0.21602],[0.69525,0.97610,0.21294],[0.70553,0.97255,0.21032],[0.71577,0.96875,0.20815],[0.72596,0.96470,0.20640],[0.73610,0.96043,0.20504],[0.74617,0.95593,0.20406],[0.75617,0.95121,0.20343],[0.76608,0.94627,0.20311],[0.77591,0.94113,0.20310],[0.78563,0.93579,0.20336],[0.79524,0.93025,0.20386],[0.80473,0.92452,0.20459],[0.81410,0.91861,0.20552],[0.82333,0.91253,0.20663],[0.83241,0.90627,0.20788],[0.84133,0.89986,0.20926],[0.85010,0.89328,0.21074],[0.85868,0.88655,0.21230],[0.86709,0.87968,0.21391],[0.87530,0.87267,0.21555],[0.88331,0.86553,0.21719],[0.89112,0.85826,0.21880],[0.89870,0.85087,0.22038],[0.90605,0.84337,0.22188],[0.91317,0.83576,0.22328],[0.92004,0.82806,0.22456],[0.92666,0.82025,0.22570],[0.93301,0.81236,0.22667],[0.93909,0.80439,0.22744],[0.94489,0.79634,0.22800],[0.95039,0.78823,0.22831],[0.95560,0.78005,0.22836],[0.96049,0.77181,0.22811],[0.96507,0.76352,0.22754],[0.96931,0.75519,0.22663],[0.97323,0.74682,0.22536],[0.97679,0.73842,0.22369],[0.98000,0.73000,0.22161],[0.98289,0.72140,0.21918],[0.98549,0.71250,0.21650],[0.98781,0.70330,0.21358],[0.98986,0.69382,0.21043],[0.99163,0.68408,0.20706],[0.99314,0.67408,0.20348],[0.99438,0.66386,0.19971],[0.99535,0.65341,0.19577],[0.99607,0.64277,0.19165],[0.99654,0.63193,0.18738],[0.99675,0.62093,0.18297],[0.99672,0.60977,0.17842],[0.99644,0.59846,0.17376],[0.99593,0.58703,0.16899],[0.99517,0.57549,0.16412],[0.99419,0.56386,0.15918],[0.99297,0.55214,0.15417],[0.99153,0.54036,0.14910],[0.98987,0.52854,0.14398],[0.98799,0.51667,0.13883],[0.98590,0.50479,0.13367],[0.98360,0.49291,0.12849],[0.98108,0.48104,0.12332],[0.97837,0.46920,0.11817],[0.97545,0.45740,0.11305],[0.97234,0.44565,0.10797],[0.96904,0.43399,0.10294],[0.96555,0.42241,0.09798],[0.96187,0.41093,0.09310],[0.95801,0.39958,0.08831],[0.95398,0.38836,0.08362],[0.94977,0.37729,0.07905],[0.94538,0.36638,0.07461],[0.94084,0.35566,0.07031],[0.93612,0.34513,0.06616],[0.93125,0.33482,0.06218],[0.92623,0.32473,0.05837],[0.92105,0.31489,0.05475],[0.91572,0.30530,0.05134],[0.91024,0.29599,0.04814],[0.90463,0.28696,0.04516],[0.89888,0.27824,0.04243],[0.89298,0.26981,0.03993],[0.88691,0.26152,0.03753],[0.88066,0.25334,0.03521],[0.87422,0.24526,0.03297],[0.86760,0.23730,0.03082],[0.86079,0.22945,0.02875],[0.85380,0.22170,0.02677],[0.84662,0.21407,0.02487],[0.83926,0.20654,0.02305],[0.83172,0.19912,0.02131],[0.82399,0.19182,0.01966],[0.81608,0.18462,0.01809],[0.80799,0.17753,0.01660],[0.79971,0.17055,0.01520],[0.79125,0.16368,0.01387],[0.78260,0.15693,0.01264],[0.77377,0.15028,0.01148],[0.76476,0.14374,0.01041],[0.75556,0.13731,0.00942],[0.74617,0.13098,0.00851],[0.73661,0.12477,0.00769],[0.72686,0.11867,0.00695],[0.71692,0.11268,0.00629],[0.70680,0.10680,0.00571],[0.69650,0.10102,0.00522],[0.68602,0.09536,0.00481],[0.67535,0.08980,0.00449],[0.66449,0.08436,0.00424],[0.65345,0.07902,0.00408],[0.64223,0.07380,0.00401],[0.63082,0.06868,0.00401],[0.61923,0.06367,0.00410],[0.60746,0.05878,0.00427],[0.59550,0.05399,0.00453],[0.58336,0.04931,0.00486],[0.57103,0.04474,0.00529],[0.55852,0.04028,0.00579],[0.54583,0.03593,0.00638],[0.53295,0.03169,0.00705],[0.51989,0.02756,0.00780],[0.50664,0.02354,0.00863],[0.49321,0.01963,0.00955],[0.47960,0.01583,0.01055]]
# The look-up table contains 256 entries. Each entry is a floating point sRGB triplet.
# To use it with matplotlib, pass cmap=ListedColormap(turbo_colormap_data) as an arg to imshow() (don't forget "from matplotlib.colors import ListedColormap").
# If you have a typical 8-bit greyscale image, you can use the 8-bit value to index into this LUT directly.
# The floating point color values can be converted to 8-bit sRGB via multiplying by 255 and casting/flooring to an integer. Saturation should not be required for IEEE-754 compliant arithmetic.
# If you have a floating point value in the range [0,1], you can use interpolate() to linearly interpolate between the entries.
# If you have 16-bit or 32-bit integer values, convert them to floating point values on the [0,1] range and then use interpolate(). Doing the interpolation in floating point will reduce banding.
# If some of your values may lie outside the [0,1] range, use interpolate_or_clip() to highlight them.
def interpolate(colormap, x):
x = max(0.0, min(1.0, x))
a = int(x*255.0)
b = min(255, a + 1)
f = x*255.0 - a
return [colormap[a][0] + (colormap[b][0] - colormap[a][0]) * f,
colormap[a][1] + (colormap[b][1] - colormap[a][1]) * f,
colormap[a][2] + (colormap[b][2] - colormap[a][2]) * f]
def interpolate_or_clip(colormap, x):
if x < 0.0: return [0.0, 0.0, 0.0]
elif x > 1.0: return [1.0, 1.0, 1.0]
else: return interpolate(colormap, x)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.