repo_name
stringlengths 6
92
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 821
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
LukeC92/iris | lib/iris/tests/unit/plot/test_contour.py | 11 | 2995 | # (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.plot.contour` function."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
from iris.tests.stock import simple_2d
from iris.tests.unit.plot import TestGraphicStringCoord, MixinCoords
if tests.MPL_AVAILABLE:
import iris.plot as iplt
@tests.skip_plot
class TestStringCoordPlot(TestGraphicStringCoord):
def test_yaxis_labels(self):
iplt.contour(self.cube, coords=('bar', 'str_coord'))
self.assertPointsTickLabels('yaxis')
def test_xaxis_labels(self):
iplt.contour(self.cube, coords=('str_coord', 'bar'))
self.assertPointsTickLabels('xaxis')
def test_yaxis_labels_with_axes(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
iplt.contour(self.cube, axes=ax, coords=('bar', 'str_coord'))
plt.close(fig)
self.assertPointsTickLabels('yaxis', ax)
def test_xaxis_labels_with_axes(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
iplt.contour(self.cube, axes=ax, coords=('str_coord', 'bar'))
plt.close(fig)
self.assertPointsTickLabels('xaxis', ax)
def test_geoaxes_exception(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
self.assertRaises(TypeError, iplt.contour, self.lat_lon_cube, axes=ax)
plt.close(fig)
@tests.skip_plot
class TestCoords(tests.IrisTest, MixinCoords):
def setUp(self):
# We have a 2d cube with dimensionality (bar: 3; foo: 4)
self.cube = simple_2d(with_bounds=False)
self.foo = self.cube.coord('foo').points
self.foo_index = np.arange(self.foo.size)
self.bar = self.cube.coord('bar').points
self.bar_index = np.arange(self.bar.size)
self.data = self.cube.data
self.dataT = self.data.T
self.mpl_patch = self.patch('matplotlib.pyplot.contour')
self.draw_func = iplt.contour
if __name__ == "__main__":
tests.main()
| lgpl-3.0 |
chi-hung/PythonTutorial | code_examples/amzRevs_crawler.py | 2 | 10384 | #!/usr/local/bin/python3
import requests
import re
import datetime
import random
from time import sleep
import bs4
from bs4 import BeautifulSoup
import pandas as pd
from pandas import Series, DataFrame
import sqlalchemy
from sqlalchemy import create_engine,Table,Column,Integer,String,MetaData,ForeignKey,Date,update
import warnings
import multiprocessing as mp
class ReviewsExtracter:
def Authors(self,soup,ProdId):
#擷取評論者 &產品ID
list_author=[]
list_prodid=[]
rAuthors=soup.select('div[data-hook="review"]')
for author in rAuthors:
ath=author.div.next_sibling.span.get_text("|",strip=True).split("|")
if(len(ath)==1):
list_author.append(ath[0])
else:
list_author.append(ath[1])
list_prodid.append(ProdId)
return list_author,list_prodid
def Stars(self,soup):
#擷取星星數
list_star=[]
Rstars=soup.select("i[data-hook='review-star-rating']")
for star in Rstars:
star=re.sub('[a-zA-Z].+s','',star.text).split(' ')[0][0]
fStar=int(star)
list_star.append(fStar)
return list_star
def Dates(self,soup):
#擷取日期
list_date=[]
rDates=soup.select("span[data-hook='review-date']")
for date in rDates:
date2=(date.text)[3:]
date3=datetime.datetime.strptime(date2, '%B %d, %Y').strftime('%Y-%m-%d')
list_date.append(date3)
return list_date
def Title(self,soup):
#擷取評論主旨
list_title=[]
rtitle=soup.select("a[data-hook='review-title']")
for title in rtitle:
list_title.append(title.text)
return list_title
def Reviews(self,soup):
#擷取評論內容
list_review=[]
reviews=soup.select("span[class='a-size-base review-text']")
for review in reviews:
list_review.append(review.get_text(separator="\n\n",strip=True))
return list_review
def Verifieds(self,soup):
#擷取購買驗證
list_verified=[]
rVerifieds=soup.select('div[class="a-row a-spacing-mini review-data review-format-strip"]')
a=0
for verified in rVerifieds :
if 'Verified' in verified.text:
ver=1
a+=1
else:
ver=0
a+=1
list_verified.append(ver)
return list_verified
def Comments(self,soup):
#擷取評論回覆數
list_comment=[]
rcomments = soup.select('span[class="review-comment-total aok-hidden"]')
for comment in rcomments:
list_comment.append(comment.text)
return list_comment
def Helps(self,soup):
#擷取覺得有幫助的人數
list_helps=[]
tagsHelps=soup.select('span[class="cr-vote-buttons"] > span[class="a-color-secondary"]')
idx=0
for helps in tagsHelps:
if "One" in helps.text:
NumPeopleFindHelpful=1
elif (helps.span==None):
NumPeopleFindHelpful=0
else:
NumPeopleFindHelpful=int(re.sub('[^0-9]', '',(helps.text)))
idx+=1
list_helps.append(NumPeopleFindHelpful)
return list_helps
def Crawler(self,ProdId,ProdName,totalNumReviews,maxretrytime=60):
"""
此函數輸入ProdId,ProdName,reviews
輸出為含有Amazon評論等資訊的表單
"""
url_base ="https://www.amazon.com/"
url_01 = "/product-reviews/"
url_02="/ref=cm_cr_arp_d_paging_btm_1?pageNumber="
url_03="&reviewerType=all_reviews&pageSize=50"
#決定要換多少頁
totalNumPages=int(totalNumReviews/50)+2
list_prodid=[]
list_author=[]
list_star=[]
list_date=[]
list_title=[]
list_review=[]
list_verified=[]
list_comments=[]
list_helps=[]
for currentPageNum in range(1,totalNumPages+1):
print("ProdId= %s. Total number of pages= %s. Current page= %s."%(ProdId,totalNumPages,currentPageNum) )
passed=False
cnt=0
while(passed==False):
cnt+=1
if(cnt>maxretrytime):
raise Exception("Error! Tried too many times but we are still blocked by Amazon.")
print("ProdId="+ProdId+","+"CurrentPage="+currentPageNum)
try:
# 建立連線
with requests.Session() as session:
#session.headers = {'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0"}
uri=url_base+ProdName+url_01+ProdId+url_02+str(currentPageNum)+url_03
r=session.get(uri)
if(r.status_code!=200):
print("Connection failed(status/=200). Reconnecting...")
sleep(0.3)
else:
# 回應200則獲取湯
soup = BeautifulSoup(r.content,"lxml")
#print(soup.prettify())
# 若發現湯是假的,就小睡數秒,之後再重複獲取一次湯。若重複獲取湯的次數超過maxretrytime,程式將終止
if("Robot Check" in soup.text):
print("we are identified as a robot! Reconnecting...")
sleep(0.2+0.1*random.randint(0,1)) # 睡 0.2 或 0.3 秒
if(cnt>25):
sleep(0.5) # 要是一直不行(重試超過25次),不如就再多睡0.5秒吧
else:
print("We've obtained the correct soup!")
passed=True
lst_author,lst_prodid=self.Authors(soup,ProdId) #評論者與ProdId 分別放到2個列表
lst_star=self.Stars(soup) #星星數
lst_date=self.Dates(soup) #日期
lst_title=self.Title(soup) #評論主旨
lst_review=self.Reviews(soup) #評論內容
lst_verified=self.Verifieds(soup) #購買驗證
lst_comments=self.Comments(soup) #評論回覆數
lst_helps=self.Helps(soup) #覺得有幫助的人數
print("URL=",uri)
lengths=[len(lst_prodid),len(lst_author),len(lst_star),len(lst_date),len(lst_title),len(lst_review),len(lst_verified),len(lst_comments),len(lst_helps)]
if(len(set(lengths))!=1):
print(lengths)
warnings.warn('Beware. Lists obtained have no equal length.')
print("length of lst_prodid=",len(lst_prodid))
print("length of lst_author=",len(lst_author))
print("length of lst_star=",len(lst_star))
print("length of lst_date=",len(lst_date))
print("length of lst_title=",len(lst_title))
print("length of lst_review=",len(lst_review))
print("length of lst_verified=",len(lst_verified))
print("length of lst_comments=",len(lst_comments))
print("length of lst_helps=",len(lst_helps))
except:
print("Error encounted! ProdId= "+ProdId+". "+"Current Page= "+str(currentPageNum))
print("The error is probably caused by connection time out? Reconnecting...")
sleep(0.3)
list_prodid+=lst_prodid
list_author+=lst_author
list_star+=lst_star
list_date+=lst_date
list_title+=lst_title
list_review+=lst_review
list_verified+=lst_verified
list_comments+=lst_comments
list_helps+=lst_helps
df=pd.DataFrame.from_items([("pindex",list_prodid),("author",list_author),("star",list_star),\
("date",list_date),("title",list_title),("review",list_review), \
("verified",list_verified),("comment",list_comments),("help",list_helps)])\
.drop_duplicates("review").reset_index(drop=True)
return df
def prodInfoFetcherForCrawler(self,thisCrawlerID,prodType):
"""
the crawler needs to know who are the items that their reviews are not fetched yet and the webpage of those items.
This method fetches those necessary informations that the crawler needs to know.
"""
prodTypes=["central","canister","handheld","robotic","stick","upright","wetdry"]
engine=create_engine("mysql+pymysql://semantic:[email protected]:13606/semantic?charset=utf8mb4",echo=False, encoding='utf-8')
conn = engine.connect()
sql_command = "SELECT pindex,pname,totalRev,cID,cStatus FROM semantic.amzProd where type='"+ prodType +"' \
and cStatus!=1 and cID="+str(thisCrawlerID)+" ORDER BY totalRev desc"
resultSet = pd.read_sql_query(sql=sql_command, con=conn, coerce_float=False)
conn.close()
return resultSet
def prodRevstoSQL(self,ProdId,resultTable):
"""
this method will upload the fetched customer reviews of a single product to the SQL server
"""
prodTypes=["central","canister","handheld","robotic","stick","upright","wetdry"]
# prepare the connection and connect to the DB
engine=create_engine("mysql+pymysql://semantic:[email protected]:13606/semantic?charset=utf8mb4",convert_unicode=True,echo=False)
conn = engine.connect()
resultTable.to_sql(name='amzRev', con=conn, if_exists = 'append', index=False)
sql_command = "UPDATE semantic.amzProd SET cStatus=1 where pindex='"+ ProdId +"'"
result = conn.execute(sql_command)
# close the connection
conn.close()
def run(self,begin,end,incr,resultSet,nRows,nCols):
"""
this function will fetch customer reviews of a single product
"""
for j in range(begin,end,incr):
print("this is item %i of %i items"%(j+1,nRows))
ProdId,ProdName,NumReviews=resultSet.loc[j,["pindex","pname","totalRev"]]
print(j+1,ProdId,ProdName,NumReviews)
resultTable=self.Crawler(ProdId,ProdName,NumReviews)
print("the shape of the obtained table is %s X %s \n"%(resultTable.shape[0],resultTable.shape[1]))
self.prodRevstoSQL(ProdId,resultTable)
def multiThreadedRun(self,resultSet,thisCrawlerID,nThreads):
if(resultSet.shape[0] >= nThreads):
nRows,nCols=resultSet.shape[0],resultSet.shape[1]
print("number of products to be fetched= ",nRows)
# Let's use 2 threads to finish our task
for j in range(nThreads):
print("index of iterations for thread%i= "%j,*range(j,nRows,nThreads))
processes = [mp.Process(target=self.run, args=(j,nRows,nThreads,resultSet,nRows,nCols,) ) for j in range(nThreads)]
# start and run the processes
for p in processes:
p.start()
for p in processes:
p.join()
else:
print("this code stopped because number of rows= ",resultSet.shape[0])
prodTypes=["central","canister","handheld","robotic","stick","upright","wetdry"]
###########################################################################################
# INPUT PARAMETERS
prodType=prodTypes[-2] # 指定要爬的吸塵器種類
thisCrawlerID=6 # 指定此爬蟲程式ID
nThreads=1 # 選擇使用多少執行緒來爬蟲
###########################################################################################
extractor=ReviewsExtracter()
resultSet=extractor.prodInfoFetcherForCrawler(thisCrawlerID,prodType)
extractor.multiThreadedRun(resultSet,thisCrawlerID,nThreads)
| mit |
aewhatley/scikit-learn | examples/calibration/plot_calibration_curve.py | 225 | 5903 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.cross_validation import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration cuve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration cuve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
ehthiede/EMUS | examples/AlaDipeptide_1D/demo.py | 1 | 3958 | # -*- coding: utf-8 -*-
"""
Example script with basic usage of the EMUS package.
The script follows the quickstart guide closely, with slight adjustments (for simplicity we have moved all plotting commands to the bottom of the script).
Please note that the demo requires matplotlib.
This is not a dependency in the emus package to keep things lightweight.
"""
import numpy as np
from emus import usutils as uu
from emus import emus, avar
import matplotlib.pyplot as plt
# Define Simulation Parameters
T = 310 # Temperature in Kelvin
k_B = 1.9872041E-3 # Boltzmann factor in kcal/mol
kT = k_B * T
meta_file = 'cv_meta.txt' # Path to Meta File
dim = 1 # 1 Dimensional CV space.
period = 360 # Dihedral Angles periodicity
nbins = 60 # Number of Histogram Bins.
# Load data
psis, cv_trajs, neighbors = uu.data_from_meta(
meta_file, dim, T=T, k_B=k_B, period=period)
# Calculate the partition function for each window
z, F = emus.calculate_zs(psis, neighbors=neighbors)
# Calculate error in each z value from the first iteration.
zerr, zcontribs, ztaus = avar.calc_partition_functions(
psis, z, F, iat_method='acor')
# Calculate the PMF from EMUS
domain = ((-180.0, 180.)) # Range of dihedral angle values
pmf, edges = emus.calculate_pmf(
cv_trajs, psis, domain, z, nbins=nbins, kT=kT, use_iter=False) # Calculate the pmf
# Calculate z using the MBAR iteration.
z_iter_1, F_iter_1 = emus.calculate_zs(psis, n_iter=1)
z_iter_2, F_iter_2 = emus.calculate_zs(psis, n_iter=2)
z_iter_5, F_iter_5 = emus.calculate_zs(psis, n_iter=5)
z_iter_1k, F_iter_1k = emus.calculate_zs(psis, n_iter=1000)
# Calculate new PMF
iterpmf, edges = emus.calculate_pmf(
cv_trajs, psis, domain, nbins=nbins, z=z_iter_1k, kT=kT)
# Estimate probability of being in C7 ax basin
fdata = [((traj > 25) & (traj < 100)).flatten() for traj in cv_trajs]
# Calculate the probability and perform error analysis.
iat, probC7ax, probC7ax_contribs = avar.calc_avg_ratio(
psis, z, F, fdata, iat_method='acor')
probC7ax_std = np.sqrt(np.sum(probC7ax_contribs))
# This command just calculates the probability, without error analysis.
prob_C7ax_iter = emus.calculate_obs(
psis, z_iter_1k, fdata, use_iter=True) # Just calculate the probability
avg_pmf, edges = emus.calculate_avg_on_pmf(
cv_trajs, psis, (-180, 180), z_iter_1k, fdata, use_iter=True, nbins=nbins) # Just calculate the probability
plt.plot(avg_pmf)
plt.xlabel('Dihedral Angle')
plt.ylabel('Avg of Indicator on PMF')
plt.show()
# Get the asymptotic error of each histogram bin.
pmf_av_mns, pmf_avars = avar.calc_pmf(
cv_trajs, psis, domain, z, F, nbins=nbins, kT=kT, iat_method=np.average(ztaus, axis=0))
### Data Output Section ###
# Plot the EMUS, Iterative EMUS pmfs.
pmf_centers = (edges[0][1:]+edges[0][:-1])/2.
plt.figure()
plt.errorbar(pmf_centers, pmf_av_mns, yerr=np.sqrt(
pmf_avars), label='EMUS PMF w. AVAR')
plt.plot(pmf_centers, iterpmf, label='Iter EMUS PMF')
plt.xlabel('$\psi$ dihedral angle')
plt.ylabel('Unitless FE')
plt.legend()
plt.title('EMUS and Iterative EMUS potentials of Mean Force')
plt.show()
# Plot the relative normalization constants as fxn of max iteration.
plt.errorbar(np.arange(len(z)), -np.log(z),
yerr=np.sqrt(zerr)/z, label="Iteration 0")
plt.plot(-np.log(z_iter_1), label="Iteration 1")
plt.plot(-np.log(z_iter_1k), label="Iteration 1k", linestyle='--')
plt.xlabel('Window Index')
plt.ylabel('Unitless Free Energy')
plt.title('Window Free Energies and Iter No.')
plt.legend(loc='upper left')
plt.show()
# Print the C7 ax basin probability
print("EMUS Probability of C7ax basin is %f +/- %f" % (probC7ax, probC7ax_std))
print("Iterative EMUS Probability of C7ax basin is %f" % (prob_C7ax_iter))
print("Asymptotic coefficient of variation for each partition function:")
print(np.sqrt(zerr)/z)
| mit |
Minhmo/tardis | tardis/plasma/standard_plasmas.py | 3 | 5613 | import logging
import pandas as pd
import numpy as np
from tardis.plasma import BasePlasma
from tardis.plasma.properties.property_collections import (basic_inputs,
basic_properties, lte_excitation_properties, lte_ionization_properties,
macro_atom_properties, dilute_lte_excitation_properties,
nebular_ionization_properties, non_nlte_properties,
nlte_properties, helium_nlte_properties, helium_numerical_nlte_properties)
from tardis.plasma.exceptions import PlasmaConfigError
from tardis.plasma.properties import LevelBoltzmannFactorNLTE
logger = logging.getLogger(__name__)
class LTEPlasma(BasePlasma):
def __init__(self, t_rad, abundance, density, time_explosion, atomic_data,
j_blues, link_t_rad_t_electron=0.9, delta_treatment=None):
plasma_modules = basic_inputs + basic_properties + \
lte_excitation_properties + lte_ionization_properties + \
non_nlte_properties
super(LTEPlasma, self).__init__(plasma_properties=plasma_modules,
t_rad=t_rad, abundance=abundance, atomic_data=atomic_data,
density=density, time_explosion=time_explosion, j_blues=j_blues,
w=None, link_t_rad_t_electron=link_t_rad_t_electron,
delta_input=delta_treatment, nlte_species=None)
class LegacyPlasmaArray(BasePlasma):
def from_number_densities(self, number_densities, atomic_data):
atomic_mass = atomic_data.atom_data.ix[number_densities.index].mass
elemental_density = number_densities.mul(atomic_mass,
axis='index')
density = elemental_density.sum()
abundance = pd.DataFrame(elemental_density/density,
index=number_densities.index, columns=number_densities.columns,
dtype=np.float64)
return abundance, density
def initial_t_rad(self, number_densities):
return np.ones(len(number_densities.columns)) * 10000
def initial_w(self, number_densities):
return np.ones(len(number_densities.columns)) * 0.5
def update_radiationfield(self, t_rad, ws, j_blues, nlte_config,
t_electrons=None, n_e_convergence_threshold=0.05,
initialize_nlte=False):
if nlte_config is not None and nlte_config.species:
self.store_previous_properties()
self.update(t_rad=t_rad, w=ws, j_blues=j_blues)
def __init__(self, number_densities, atomic_data, time_explosion,
t_rad=None, delta_treatment=None, nlte_config=None,
ionization_mode='lte', excitation_mode='lte',
line_interaction_type='scatter', link_t_rad_t_electron=0.9,
helium_treatment='none', heating_rate_data_file=None,
v_inner=None, v_outer=None):
plasma_modules = basic_inputs + basic_properties
if excitation_mode == 'lte':
plasma_modules += lte_excitation_properties
elif excitation_mode == 'dilute-lte':
plasma_modules += dilute_lte_excitation_properties
else:
raise NotImplementedError('Sorry {0} not implemented yet.'.format(
excitation_mode))
if ionization_mode == 'lte':
plasma_modules += lte_ionization_properties
elif ionization_mode == 'nebular':
plasma_modules += nebular_ionization_properties
else:
raise NotImplementedError('Sorry ' + ionization_mode +
' not implemented yet.')
if nlte_config is not None and nlte_config.species:
plasma_modules += nlte_properties
if nlte_config.classical_nebular==True and \
nlte_config.coronal_approximation==False:
LevelBoltzmannFactorNLTE(self, classical_nebular=True)
elif nlte_config.coronal_approximation==True and \
nlte_config.classical_nebular==False:
LevelBoltzmannFactorNLTE(self, coronal_approximation=True)
elif nlte_config.coronal_approximation==True and \
nlte_config.classical_nebular==True:
raise PlasmaConfigError('Both coronal approximation and '
'classical nebular specified in the '
'config.')
else:
plasma_modules += non_nlte_properties
if line_interaction_type in ('downbranch', 'macroatom'):
plasma_modules += macro_atom_properties
if t_rad is None:
t_rad = self.initial_t_rad(number_densities)
w = self.initial_w(number_densities)
abundance, density = self.from_number_densities(number_densities,
atomic_data)
if nlte_config is not None and nlte_config.species:
self.nlte_species = nlte_config.species
else:
self.nlte_species = None
if helium_treatment=='recomb-nlte':
plasma_modules += helium_nlte_properties
if helium_treatment=='numerical-nlte':
plasma_modules += helium_numerical_nlte_properties
if heating_rate_data_file is None:
raise PlasmaConfigError('Heating rate data file not specified')
else:
self.heating_rate_data_file = heating_rate_data_file
self.v_inner = v_inner
self.v_outer = v_outer
self.delta_treatment = delta_treatment
super(LegacyPlasmaArray, self).__init__(
plasma_properties=plasma_modules, t_rad=t_rad,
abundance=abundance, density=density,
atomic_data=atomic_data, time_explosion=time_explosion,
j_blues=None, w=w, link_t_rad_t_electron=link_t_rad_t_electron)
| bsd-3-clause |
mfjb/scikit-learn | sklearn/metrics/classification.py | 95 | 67713 | """Metrics to assess performance on classification task given classe prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Jatin Shah <[email protected]>
# Saurabh Jha <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy.spatial.distance import hamming as sp_hamming
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from .base import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<http://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
CM = coo_matrix((np.ones(y_true.shape[0], dtype=np.int), (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1], a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2].
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistic 34(4):555-596.
"""
confusion = confusion_matrix(y1, y2, labels=labels)
P = confusion / float(confusion.sum())
p_observed = np.trace(P)
p_expected = np.dot(P.sum(axis=0), P.sum(axis=1))
return (p_observed - p_expected) / (1 - p_expected)
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<http://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
# If there is no label, it results in a Nan instead, we set
# the jaccard to 1: lim_{x->0} x/x = 1
# Note with py2.6 and np 1.3: we can't check safely for nan.
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<http://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
with np.errstate(invalid='ignore'):
mcc = np.corrcoef(y_true, y_pred)[0, 1]
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta: float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision: float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall: float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score: float (if average is not None) or array of float, shape =\
[n_unique_labels]
support: int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<http://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary' and (y_type != 'binary' or pos_label is None):
warnings.warn('The default `weighted` averaging is deprecated, '
'and from version 0.18, use of precision, recall or '
'F-score with multiclass or multilabel data or '
'pos_label=None will result in an exception. '
'Please set an explicit value for `average`, one of '
'%s. In cross validation use, for instance, '
'scoring="f1_weighted" instead of scoring="f1".'
% str(average_options), DeprecationWarning, stacklevel=2)
average = 'weighted'
if y_type == 'binary' and pos_label is not None and average is not None:
if average != 'binary':
warnings.warn('From version 0.18, binary input will not be '
'handled specially when using averaged '
'precision/recall/F-score. '
'Please use average=\'binary\' to report only the '
'positive class performance.', DeprecationWarning)
if labels is None or len(labels) <= 2:
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
### Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
### Finally, we have all our sufficient statistics. Divide! ###
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
## Average the results ##
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = 'avg / total'
if target_names is None:
width = len(last_line_heading)
target_names = ['%s' % l for l in labels]
else:
width = max(len(cn) for cn in target_names)
width = max(width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["{0:0.{1}f}".format(v, digits)]
values += ["{0}".format(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["{0:0.{1}f}".format(v, digits)]
values += ['{0}'.format(np.sum(s))]
report += fmt % tuple(values)
return report
def hamming_loss(y_true, y_pred, classes=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
classes : array, shape = [n_labels], optional
Integer array of labels.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<http://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if classes is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(classes)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred)
return (n_differences / (y_true.shape[0] * len(classes)))
elif y_type in ["binary", "multiclass"]:
return sp_hamming(y_true, y_pred)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
lb = LabelBinarizer()
T = lb.fit_transform(y_true)
if T.shape[1] == 1:
T = np.append(1 - T, T, axis=1)
# Clipping
Y = np.clip(y_pred, eps, 1 - eps)
# This happens in cases when elements in y_pred have type "str".
if not isinstance(Y, np.ndarray):
raise ValueError("y_pred should be an array of floats.")
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if Y.ndim == 1:
Y = Y[:, np.newaxis]
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
# Check if dimensions are consistent.
check_consistent_length(T, Y)
T = check_array(T)
Y = check_array(Y)
if T.shape[1] != Y.shape[1]:
raise ValueError("y_true and y_pred have different number of classes "
"%d, %d" % (T.shape[1], Y.shape[1]))
# Renormalize
Y /= Y.sum(axis=1)[:, np.newaxis]
loss = -(T * np.log(Y)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<http://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) != 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int (default: None)
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
http://en.wikipedia.org/wiki/Brier_score
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
| bsd-3-clause |
ArcticSnow/dempy | dempy/decomposition/fourierSpectrum.py | 1 | 13362 | import pyximport; pyximport.install()
import ds
import numpy as np
import matplotlib.pyplot as plt
import cv2
import pandas as pd
from scipy.stats import chi2
import smooth2d as sm
class fftDecomposition(object):
'''
Class to perform analysis such as the one in Perron et al. 2008
'''
def __init__(self):
self.dem = None
self.dx = None
self.dy = self.dx
self.nx = None
self.ny = None
self.FreqMat = None
self.DFTperiodogram = None
self.Power_vec = None
self.fshift = None
self.Freq_vec = None
self.DFTperiodogram_norm = None
self.Power_vec_norm = None
self.radial_power = None
self.radial_freq = None
self.dem_pad = None
def smoothing(self, kernel_size=None, ret=False):
'''
Function to smooth
:param Kernel_size:
:param ret:
:return:
'''
if kernel_size is None:
kernel_size = np.int(np.min(self.dem.shape)/3)
kernel = sm.kernel_square(kernel_size)
self.dem_sm = sm.smooth(self.dem, kernel)
self.dem = self.dem - self.dem_sm
if ret is True:
return self.dem_sm
def fftmat(self, mat, dx=1, dy=1, pad=False, pad_window=True, openCV=True):
'''
Function to derive Fourier transform. Returns 2D and 1D periodogram
:param mat: input 2D matirx
:param dx: pixel resolution of the matrix in the x-dir
:param dy: pixel resolution of the matrix in the y-dir
:param pad: apply zeros padding around the matrix following. Default is False
:param pad_window: apply hanning padding
:param openCV: use openCV algorithm to calculate FFT. Faster than numpy
:return: Power_vec -- 1D vector of the power from FFT
Freq_vec, -- 1D vector of the Frequency
FreqMat -- 2D matrix of frequency
DFTperiodogram -- 2D matrix of periodogram
'''
nx, ny = mat.shape
# apply padding if asked
if pad_window is True:
img_pad = self.hann2d(mat)
if pad is True:
print("Needs to be implemented")
Lx = np.int(2 ** (np.ceil(np.log(np.max([nx, ny])) / np.log(2))))
Ly = Lx
img_pad = mat
else:
Lx = np.int(nx)
Ly = Lx
if (pad is False) and (pad_window is False):
Lx = np.int(nx)
Ly = Lx
img_pad = mat
print("image must be padded")
# Frequency increments: from zero to Nyquist freq 1(2*dx)
dfx = 1/(dx * Lx)
dfy = 1/(dy * Ly)
print('Lx=' +str(Lx))
print('Ly=' + str(Ly))
print(img_pad.shape)
# calculate the 2D FFT
if openCV:
fft = cv2.dft(np.float32(img_pad), flags=cv2.DFT_COMPLEX_OUTPUT)
fshift = np.fft.fftshift(fft)
DFTperiodogram = np.copy(cv2.magnitude(fshift[:, :, 0], fshift[:, :, 1])**2)
fft, fshift = None, None
else:
fft = np.fft.fft2(mat)
fshift = np.fft.fftshift(fft)
# Making sure the fft of the dem is detrented as the padding might add a bias from the previously detrended dem
#fshift[np.int(Ly / 2), np.int(Lx / 2)] = 0
# derive DFT periodogram
#DFTperiodogram = np.copy(fshift * np.conj(fshift) / (Lx * Ly * Wss))
DFTperiodogram = np.copy(np.abs(fshift) ** 2)
fft, fshift = None, None
# matrix of radial frequencies
xc = np.int(Lx / 2)
yc = np.int(Ly / 2)
cols, rows = np.meshgrid(np.arange(0, Lx), np.arange(0, Ly))
FreqMat = np.sqrt((dfy * (rows - yc)) ** 2 + (dfx * (cols - xc)) ** 2)
rows, cols = None, None
Freq_vec = np.copy(FreqMat[xc:, yc:].flatten())
Power_vec = np.copy(DFTperiodogram[xc:, yc:].flatten())
# vector of sorted frequency and power
# redesign this part!!!!!!!!!!!!!!!!
# fft_part = np.copy(DFTperiodogram[:, 0:np.int(Lx / 2)])
# fmat = np.copy(FreqMat[:, 0:np.int(Lx / 2)])
# fmat[yc:Ly-1, xc-1] = -1
#
#
# fvec = np.vstack((fmat.flatten(1), fft_part.flatten(1)))
# print 'fvec shape: ' + str(fvec.shape)
# fvec= np.copy(fvec[:, fvec[0, :].argsort()])
# fvec = np.copy(fvec[:, (fvec[0, :] > 0)])
#
# # separate into power and frequency vectors
# Power_vec = 2 * fvec[1, :]
# Freq_vec = fvec[0, :]
return Power_vec, Freq_vec, FreqMat, DFTperiodogram, img_pad
def fftdem(self, pad=False, pad_window=True, openCV=True):
'''
Function to perforn fftmat() on the dem loaded into the class
:param openCV: use
:return: update class self variable
'''
self.nx, self.ny = self.dem.shape
self.Power_vec, self.Freq_vec, self.FreqMat, self.DFTperiodogram, self.dem_pad = self.fftmat(self.dem, self.dx, self.dy, pad=pad, pad_window=pad_window, openCV=openCV)
def hann2d(self, mat):
'''
Perform hanning filtering to a 2D matrix
:param mat: 2D input matrix
:return: H -- 2D matrix including the filter
Wss --
'''
nx, ny = mat.shape
# matrix coordinates of centroid
a = (nx + 1) / 2
b = (ny + 1) / 2
X, Y = np.meshgrid(np.arange(0, ny), np.arange(0, nx))
inter = a * (np.pi / 2) + (X != a)
theta = inter * np.arctan2((Y - b), (X - a)) # angular polar coordinate
inter = None
r = np.sqrt((Y - b) ** 2 + (X - a) ** 2) # radial polar coordinates
X, Y = None, None
# radius of ellipse for this theta
rprime = np.sqrt((a ** 2) * (b ** 2) * (b ** 2 * (np.cos(theta)) ** 2 + a ** 2 * (np.sin(theta)) ** 2) ** (-1))
theta = None
ind = (r < rprime) * 0.5
rrp = r / rprime
r, rprime = None, None
hanncoeff = ind * (1 + np.cos(np.pi * rrp))
ind = None
H = mat * hanncoeff
hanncoeff, rpp = None, None
return H
def plot_2D_spec(self):
return
def bin_scatter(self,x,y, nbins=10):
'''
Function to bin Y data base on X
:param x: 1D vector
:param y: 1D vector
:param nbins: number of evenly spaced bins
:return: a dataframe with various stats of Y for each bin
'''
df = pd.DataFrame(np.transpose([x,y]))
df.columns = ['freq', 'power']
freq_cut, bins = pd.cut(df.freq, nbins, retbins=True)
binned = df.groupby(freq_cut)
spect1D_bin = pd.DataFrame()
spect1D_bin['freq'] = [(a + b) / 2 for a, b in zip(bins[:-1], bins[1:])]
spect1D_bin['power_min'] = binned.power.min().as_matrix()
spect1D_bin['power_max'] = binned.power.max().as_matrix()
spect1D_bin['power_mean'] = binned.power.mean().as_matrix()
spect1D_bin['power_std'] = binned.power.std().as_matrix()
return spect1D_bin
def plot_1D_spec(self,x, y, nbins=10, errorbar=False, bin_only=True):
# Create figure
plt.figure()
if bin_only is False:
plt.scatter(x, y)
scat_bin = self.bin_scatter(x,y,nbins=nbins)
plt.plot(scat_bin.freq, scat_bin.power_mean,color='k')
plt.scatter(scat_bin.freq, scat_bin.power_mean, s=50, c='k')
plt.yscale('log')
plt.xscale('log')
plt.xlabel('Frequency [1/m]')
plt.ylabel('DFT mean squared amplitude')
plt.title('Periodogram')
if errorbar:
plt.errorbar(scat_bin.freq, scat_bin.power_mean, yerr=scat_bin.power_std/2, color='k')
def plot_1D_specDEM(self, nbins=10, errorbars=False):
self.plot_1D_spec(self.Freq_vec, self.Power_vec, nbins=nbins, errorbar=errorbars)
def plot_1D_specNORM(self, nbins=10, errorbars=False):
self.plot_1D_spec(self.Freq_vec, self.Power_vec_norm, nbins=nbins, errorbar=errorbars)
plt.title('Normalized periodogram')
def normalized_spect(self, H, Zrange=1, nSynth=20, demVar=1):
'''
Function to perform the neormalization as in Perron et al. 2008 using the diamond square algorithm
:param H: roughness parameter. 0<H<1
:param Zrange: Elevation range of the synthetic terrain model
:param nSynth: number of terrain model to simulate for deriving normalization
:param demVar: Variance of the original DEM
:return: return within the class variable self.DFTperiodogram_norm, and self.Power_vec_norm
'''
for i in range(1, nSynth+1):
print('Synthetic surface # ' + str(i) + ' of ' + str(nSynth))
synthDEM = ds.diamondSquare(self.nx, self.ny, Zrange, H)
synthDEM = synthDEM * np.sqrt(demVar)/np.std(synthDEM)
Pvec, fvec, freqmat, Pmat, dem_pad = self.fftmat(synthDEM, dx=self.dx, dy=self.dy, pad_window=True)
if i == 1:
P = Pvec
Pm = Pmat
else:
P = P + Pvec
Pm = Pm + Pmat
P = P / nSynth
Pm = Pm / nSynth
# scale the average spectra so they have total power = var. This step is
# necessary because the average of N spectra, each of which has total power
# X, will not necessarily have totalpower = X.
P = P*demVar/np.nansum(P)
Pm = Pm*demVar/np.nansum(Pm)
if self.DFTperiodogram is None:
raise ValueError('run fftdem() first to evaluate DEMs transform')
self.DFTperiodogram_norm = self.DFTperiodogram / Pm
self.Power_vec_norm = self.Power_vec / P
# estimate misfit between synthetic dems and original one. This RMSE should be minize to by tuning H
bin_spec = self.bin_scatter(self.Freq_vec, self.Power_vec, nbins=15)
bin_spec_normed = self.bin_scatter(self.Freq_vec, P, nbins=15)
self.synth_rmse = np.sqrt(np.mean((bin_spec.power_mean - bin_spec_normed.power_mean)**2))
print('rmse = ' + str(self.synth_rmse))
def azimuthalAverage(self, center=None, ret=False):
'''
Calculate the azimuthally averaged radial profile.
self.magnitude_spectrum - The 2D self.magnitude_spectrum
center - The [x,y] pixel coordinates used as the center. The default is
None, which then uses the center of the self.magnitude_spectrum (including
fracitonal pixels).
code from: http://www.astrobetter.com/wiki/tiki-index.php?page=python_radial_profiles
'''
# Calculate the indices from the self.magnitude_spectrum
if self.DFTperiodogram is None:
raise ValueError('You must run self.fftdem()')
y, x = np.indices(self.DFTperiodogram.shape)
if not center:
center = np.array([(x.max() + 1 - x.min()) / 2.0, (x.max()+1 - x.min()) / 2.0])
print(center)
r = np.hypot(x - center[0], y - center[1])
x, y =None, None
# Get sorted radii
ind = np.argsort(r.flat)
r_sorted = r.flat[ind]
i_sorted = self.DFTperiodogram.flat[ind]
ind = None
# Get the integer part of the radii (bin size = 1)
r_int = r_sorted.astype(int)
r_sorted = None
# Find all pixels that fall within each radial bin.
deltar = r_int[1:] - r_int[:-1] # Assumes all radii represented
rind = np.where(deltar)[0] # location of changed radius
deltar = None
nr = rind[1:] - rind[:-1] # number of radius bin
# Cumulative sum to figure out sums for each radius bin
csim = np.cumsum(i_sorted, dtype=float)
tbin = csim[rind[1:]] - csim[rind[:-1]]
csim, rind, i_sorted = None, None, None
self.radial_power = tbin / nr
#self.radial_freq = (1/self.dx)*(np.linspace(0, self.radial_power.__len__()/2-1, self.radial_power.__len__()))
tbin, nr = None, None
Lx, Ly = self.dem_pad.shape
xc = np.int(Lx / 2)
yc = np.int(Ly / 2)
dfx = 1 / (self.dx * Lx)
dfy = 1 / (self.dy * Ly)
self.radial_freq = np.linspace(0, np.sqrt((dfy * (Lx - yc)) ** 2 + (dfx * (Ly - xc)) ** 2), self.radial_power.__len__())
if ret is True:
return self.radial_power, self.radial_freq
if __name__ == '__main__':
# script to demonstrate how to use fftDecomposition() based on a random surface
t = ds.diamondSquare(100, 50, 20, .3)
p = fftDecomposition()
p.dem = t
p.dx = 1
p.dy = 1
p.smoothing(kernel_size=20)
p.fftdem()
# To generate synthetic spectrum, use H that minimizes the RMSE (could be done incrementaly)
p.normalized_spect(H=0.3, demVar=p.Power_vec.sum())
# To look at confidence level of for the normalized periodogram, use a chi-square distribution
plt.imshow(p.DFTperiodogram_norm>chi2.pdf(.90, 2))
p.plot_1D_specDEM(30)
p.plot_1D_specNORM(nbins=30)
p.azimuthalAverage()
plt.figure()
plt.semilogy(p.radial_freq, p.radial_power)
plt.xlabel('Frequency [1/m]')
plt.ylabel('Power Spectrum')
plt.figure()
plt.imshow(p.dem)
| mit |
kagayakidan/scikit-learn | examples/linear_model/plot_sgd_penalties.py | 249 | 1563 | """
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
plt.plot(xs, l1(xs), "r-", label="L1")
plt.plot(xs, -1.0 * l1(xs), "r-")
plt.plot(-1 * xs, l1(xs), "r-")
plt.plot(-1 * xs, -1.0 * l1(xs), "r-")
plt.plot(xs, l2(xs), "b-", label="L2")
plt.plot(xs, -1.0 * l2(xs), "b-")
plt.plot(-1 * xs, l2(xs), "b-")
plt.plot(-1 * xs, -1.0 * l2(xs), "b-")
plt.plot(xs, el(xs, alpha), "y-", label="Elastic Net")
plt.plot(xs, -1.0 * el(xs, alpha), "y-")
plt.plot(-1 * xs, el(xs, alpha), "y-")
plt.plot(-1 * xs, -1.0 * el(xs, alpha), "y-")
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
| bsd-3-clause |
SusanJL/iris | lib/iris/tests/experimental/test_animate.py | 11 | 2873 | # (C) British Crown Copyright 2013 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Test the animation of cubes within iris.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
import numpy as np
import iris
from iris.coord_systems import GeogCS
# Run tests in no graphics mode if matplotlib is not available.
if tests.MPL_AVAILABLE:
import iris.experimental.animate as animate
import iris.plot as iplt
@tests.skip_plot
class IntegrationTest(tests.GraphicsTest):
def setUp(self):
cube = iris.cube.Cube(np.arange(36, dtype=np.int32).reshape((3, 3, 4)))
cs = GeogCS(6371229)
coord = iris.coords.DimCoord(
points=np.array([1, 2, 3], dtype=np.int32), long_name='time')
cube.add_dim_coord(coord, 0)
coord = iris.coords.DimCoord(
points=np.array([-1, 0, 1], dtype=np.int32),
standard_name='latitude',
units='degrees',
coord_system=cs)
cube.add_dim_coord(coord, 1)
coord = iris.coords.DimCoord(
points=np.array([-1, 0, 1, 2], dtype=np.int32),
standard_name='longitude',
units='degrees',
coord_system=cs)
cube.add_dim_coord(coord, 2)
self.cube = cube
def test_cube_animation(self):
# This follows :meth:`~matplotlib.animation.FuncAnimation.save`
# to ensure that each frame corresponds to known accepted frames for
# the animation.
cube_iter = self.cube.slices(('latitude', 'longitude'))
ani = animate.animate(cube_iter, iplt.contourf)
# Disconnect the first draw callback to stop the animation
ani._fig.canvas.mpl_disconnect(ani._first_draw_id)
ani = [ani]
# Extract frame data
for data in zip(*[a.new_saved_frame_seq() for a in ani]):
# Draw each frame
for anim, d in zip(ani, data):
anim._draw_next_frame(d, blit=False)
self.check_graphic()
if __name__ == "__main__":
tests.main()
| gpl-3.0 |
andrewcbennett/iris | docs/iris/example_code/General/lineplot_with_legend.py | 18 | 1131 | """
Multi-line temperature profile plot
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
"""
import matplotlib.pyplot as plt
import iris
import iris.plot as iplt
import iris.quickplot as qplt
def main():
fname = iris.sample_data_path('air_temp.pp')
# Load exactly one cube from the given file.
temperature = iris.load_cube(fname)
# We only want a small number of latitudes, so filter some out
# using "extract".
temperature = temperature.extract(
iris.Constraint(latitude=lambda cell: 68 <= cell < 78))
for cube in temperature.slices('longitude'):
# Create a string label to identify this cube (i.e. latitude: value).
cube_label = 'latitude: %s' % cube.coord('latitude').points[0]
# Plot the cube, and associate it with a label.
qplt.plot(cube, label=cube_label)
# Add the legend with 2 columns.
plt.legend(ncol=2)
# Put a grid on the plot.
plt.grid(True)
# Tell matplotlib not to extend the plot axes range to nicely
# rounded numbers.
plt.axis('tight')
# Finally, show it.
iplt.show()
if __name__ == '__main__':
main()
| gpl-3.0 |
dsullivan7/scikit-learn | examples/linear_model/plot_ransac.py | 250 | 1673 | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
| bsd-3-clause |
bonettor/PubNative_Challenge | data_analyzer.py | 1 | 4814 | # AUTHOR RICCARDO BONETTO
import numpy as np
import pandas as pd
import math
import os.path
import matplotlib.pyplot as plt
from sklearn import preprocessing as pre
from sklearn.model_selection import StratifiedKFold
from sklearn.decomposition import PCA, KernelPCA
_n_components = 2
def read_train_data(data_path = './dataset/training.csv'):
return pd.read_csv(data_path, delimiter = ';', thousands = ',')
def read_test_data(data_path = './dataset/validation.csv'):
return pd.read_csv(data_path, delimiter = ';', thousands = ',')
def clean_data(dataset, targets,
data_label_encoder = None,
target_label_encoder = None,
scaler = None):
# COLUMN 18 HAS TOO MANY MISSING POINTS,
# I DROP IT.
dataset = dataset.drop('v18',1)
if target_label_encoder is None:
target_label_encoder = pre.LabelEncoder()
if targets is not None:
print('targets before fit', targets)
target_label_encoder.fit(targets)
print('targets after fit', targets)
if data_label_encoder is None:
data_label_encoder = {}
num_columns_keys = ['v2','v3','v8','v11','v14','v15','v17','v19']
tmp_df = dataset[num_columns_keys]
for col in tmp_df:
tmp_df.loc[:,col] = pd.to_numeric(tmp_df[col], errors='coerce')
tmp_df = pre.Imputer(axis=1).fit_transform(tmp_df)
if scaler is None:
scaler = pre.StandardScaler().fit(tmp_df)
tmp_df = scaler.transform(tmp_df)
dataset[num_columns_keys] = tmp_df[:]
cat_columns_keys = ['v1','v4','v5','v6','v7','v9','v10','v12',
'v13']
tmp_df = dataset[cat_columns_keys]
for col in tmp_df:
dataset[col] = dataset[col].factorize()[0]
if col not in data_label_encoder:
data_label_encoder[col] = pre.LabelEncoder().fit(dataset[col])
dataset[col] = data_label_encoder[col].transform(dataset[col])
if targets is not None:
targets = target_label_encoder.transform(targets)
return dataset, targets, data_label_encoder, target_label_encoder, scaler
def perform_pca(data, n_components, pca_object = None):
if pca_object == None:
#pca_object = KernelPCA(n_components = n_components, kernel='poly', degree = 3).fit(data)
pca_object = KernelPCA(n_components = n_components, kernel='poly', degree = 3).fit(data)
return pca_object.transform(data), pca_object
def prepare_data():
train_set = read_train_data()
test_set = read_test_data()
train_Y = np.array(train_set['classLabel'])
train_X = train_set.drop('classLabel', axis = 1)
test_Y = np.array(test_set['classLabel'])
test_X = test_set.drop('classLabel', axis = 1)
train_data, train_targets, data_label_encoder, target_label_encoder, scaler = clean_data(train_X, train_Y)
train_data = np.array(train_data.values)
test_data, test_targets, _, _, _ = clean_data(test_X, test_Y, data_label_encoder, target_label_encoder, scaler)
test_data = np.array(test_data.values)
print('DATA CLEANING AND SCALING OK')
return train_data, train_targets, test_data, test_targets
def balance_data(labels, true_values, false_values):
cut_value = 0
if true_values>false_values:
balancer = 1
cut_value = true_values - false_values
elif true_values<false_values:
balancer = 0
cut_value = false_values - true_values
else:
return None
index_list = []
for i in range(len(labels)):
if labels[i] == balancer:
index_list.append(i)
random_indices = np.random.permutation(
len(index_list))
index_list = [x for x in random_indices]
return index_list[:cut_value]
if __name__ == '__main__':
train_data, train_targets, test_data, test_targets = prepare_data()
train_true_values = len([x for x in range(len(train_targets)) if train_targets[x]==1])
train_false_values = len(train_targets) - train_true_values
test_true_values = len([x for x in range(len(test_targets)) if test_targets[x]==1])
test_false_values = len(test_targets) - test_true_values
print('total training elements:', train_true_values+train_false_values)
print('total true elements:', train_true_values)
print('total false elements:', train_false_values)
print('total test elements:', test_true_values+test_false_values)
print('total true elements:', test_true_values)
print('total false elements:', test_false_values)
#_n_components = train_data.shape[1]
train_data, pca_object = perform_pca(train_data, _n_components)
test_data, _ = perform_pca(test_data, _n_components, pca_object)
positive_indices = [x for x in range(len(train_targets)) if train_targets[x]==1]
positive_trains = train_data[positive_indices]
print(positive_trains)
#fig = plt.figure()
#o_plot = plt.scatter(train_data[:,0],train_data[:,1], color='r', label='NO')
#yes_plot = plt.scatter(positive_trains[:,0],positive_trains[:,1], color='b', label='YES')
#plt.legend(handles=[no_plot, yes_plot])
#fig.savefig('./ploy_3_pca.jpg')
hist = np.histogram(train_targets, bins=2)
plt.hist(train_targets,bins=2)
plt.show()
| mit |
deanjohnr/macrotrendfollow | build_factors.py | 1 | 15033 | ### build_factors.py ###
import pandas as pd
import numpy as np
import time
import datetime
from lxml import html
import requests
import json
# Scrapes Google Finance for price data
def get_prices(tickers, startdate, enddate):
#TODO: Add startdate and enddate validation
df = pd.DataFrame() #Initialize final data frame
for ticker in tickers:
df_ticker = pd.DataFrame() #Initialize ticker data frame
checkdate = enddate
lastcheckdate = ''
print('Retrieving data for ' + ticker + '...')
while (checkdate != startdate) and (checkdate != lastcheckdate):
# Build URL
firstspace = startdate.index(' ')
secondspace = startdate.index(' ',firstspace+1,-1)
startdateurl = startdate[0:firstspace]+'+'+startdate[firstspace+1:secondspace]+'%2C+'+startdate[secondspace+1:]
firstspace = checkdate.index(' ')
secondspace = checkdate.index(' ',firstspace+1,-1)
enddateurl = checkdate[0:firstspace]+'+'+checkdate[firstspace+1:secondspace]+'%2C+'+checkdate[secondspace+1:]
url = """https://www.google.com/finance/historical?q="""+ticker+"""&startdate="""+startdateurl+"""&enddate="""+enddateurl+"""&num=200"""
# Parse Page
page = requests.get(url)
tree = html.fromstring(page.content)
prices = [td.text_content()[1:len(td.text_content())-1].replace(',','').split('\n') for td in tree.xpath('//div[@id="prices"]/table[@class="gf-table historical_price"]/tr')]
try:
tmpdf = pd.DataFrame(prices[1:], columns=prices[0])
except:
print(prices)
raise
lastcheckdate = checkdate
checkdate = tmpdf.tail(1)['Date'].values[0]
if checkdate != lastcheckdate:
df_ticker = df_ticker.append(tmpdf).drop_duplicates()
df_ticker['asset'] = ticker
df_ticker['date'] = pd.to_datetime(df_ticker['Date'])
df_ticker = df_ticker.drop('Date').set_index(keys=['date','asset'])
df = df.append(df_ticker)
print('Done retrieving data')
return df
# Function to clean columns based on percentage of NaN values
def clean_columns(df, pctnan=0.2):
df = df.select_dtypes(exclude=['object'])
df = df.dropna(thresh=len(df)*pctnan, axis=1)
return df
# Function to clean outliers
def clean_outliers(df, window=3, threshold=3):
df_fill = df.rolling(window=3, center=False).median().fillna(method='bfill').fillna(method='ffill')
df_diff = (df-df_fill).abs()
mask = df_diff/df_diff.std() > threshold
df[mask] = df_fill
return df
### INITIALIZE CONFIGURATION ###
feature_columns = None
price_columns = None
momentum_columns = None
moving_average_columns = None
cftc_ignore = []
date_column = None
ticker_column = None
# Test Data Structure Default Variables
forward_periods = [10,20] # array of ints
quantiles = 5 # >= 2
train_end = pd.to_datetime('01/01/2016') # Set train and test split date
### LOAD CONFIGURATION ###
# Load Configuration File #
try:
with open('config.json') as config_file:
config = json.load(config_file)
except:
print('Error loading config.json file')
raise
# Assign Configuration Variables #
# Target field, normally price
try:
target = str(config['data']['google']['target'])
except:
print('Error configuring algorithm target')
raise
# CFTC Data
try:
cftc_ticker = str(config['data']['cftc']['ticker'])
except:
print('Error configuring CFTC ticker column')
raise
try:
cftc_date = str(config['data']['cftc']['date'])
except:
print('Error configuring CFTC date columns')
raise
try:
res = config['data']['cftc']['ignore']
if res is not None:
cftc_ignore = res
except:
cftc_ignore = []
# Tickers and CFTC mapping
try:
tickers = config['data']['tickers']
except:
print('Error configuring tickers')
raise
# Feature Configuration
# Momentum
try:
momentum_columns = config['features']['momentum']['columns']
except:
pass
try:
momentum_period = config['features']['momentum']['period']
except:
pass
# Moving Average
try:
moving_average_columns = config['features']['moving_average']['columns']
except:
pass
try:
moving_average_periods = config['features']['moving_average']['periods']
except:
pass
# Google finance
try:
price_columns = config['data']['google']['columns']
except:
price_columns = [target]
# Measurement Period
try:
start_date = config['measurement']['start_date']
except:
print('Warning: using default start date Dec 1 2005')
start_date = 'Dec 1 2005'
try:
end_date = config['measurement']['end_date']
except:
print('Warning: using default end date Jan 1 2017')
end_date = 'Jan 1 2017'
# Forward periods for prediction
try:
forward_periods = config['measurement']['forward_periods']
except:
pass
# Factor quantiles
try:
quantiles = config['measurement']['quantiles']
except:
pass
# Train test split date
try:
train_end = pd.to_datetime(config['measurement']['train_end'])
except:
print('Warning: using default train test split data Jan 1 2016')
pass
# Utility Rolling Binning Function
binrank = lambda x: pd.cut(x,bins=quantiles,labels=(np.array(range(quantiles))+1))[-1]
### LOAD AND PROCESS CFTC DATA ###
# Read in CFTC Data
#TODO: UPGRADE TO WEB SCRAPE
df_cftc = pd.read_csv('data/C_TFF_2006_2016.txt')
# Filter to relevent tickers
df_cftc = df_cftc[df_cftc[cftc_ticker].isin(tickers.values())].drop(cftc_ignore, axis=1)
# Standardize Index Columns
df_cftc['date'] = pd.to_datetime(df_cftc[cftc_date])
df_cftc['asset'] = df_cftc[cftc_ticker].replace(dict((v,k) for k,v in tickers.iteritems())) #flip keys and values for mapping
df_cftc = df_cftc.drop([cftc_ticker,cftc_date], axis=1)
### SCRAPE AND PROCESS GOOGLE PRICES ###
# Scrape
try:
df = get_prices(tickers.keys(), start_date, end_date)
except:
print('Warning: Failed to get google prices, falling back to last stored googletickerdata.csv')
df = pd.read_csv('data/googlepricedata.csv')
df = df.set_index(keys=['date', 'asset'])
else:
df.to_csv('data/googlepricedata.csv')
# Standardize Index
df = df.reset_index()
df['date'] = pd.to_datetime(df['date'])
### EXECUTE BY ASSET ###
# Initialize data frames
df_test = pd.DataFrame()
df_factor = pd.DataFrame()
for ticker in tickers.keys():
print('Processing '+ticker+'...')
## Clean outliers ##
# google finance data cleaning
df_prices = df.loc[df['asset'] == ticker]
df_prices = df_prices.set_index(keys=['date', 'asset']).sort_index()
df_prices = df_prices.apply(lambda x: pd.to_numeric(x, errors='coerce'), axis=0)
df_prices = clean_columns(df_prices, pctnan=0.2)
df_prices = clean_outliers(df_prices, window=3, threshold=3)
# cftc data cleaning
df_pos = df_cftc.loc[df_cftc['asset'] == ticker]
df_pos = df_pos.set_index(keys=['date', 'asset']).sort_index()
df_pos = df_pos.apply(lambda x: pd.to_numeric(x, errors='coerce'), axis=0)
df_pos = clean_columns(df_pos, pctnan=0.2)
df_pos = clean_outliers(df_pos, window=3, threshold=3)
# Merge Google and CFTC
df_data = df_prices.merge(right=df_pos, how='left', left_index=True, right_index=True)
# Forward Fill
df_data = df_data.fillna(method='ffill')
data_columns = np.array(df_data.columns.values)
if feature_columns is None:
feature_columns_tmp = data_columns
else:
feature_columns_tmp = feature_columns
if price_columns is None:
price_columns_tmp = df_prices.columns.values
else:
price_columns_tmp = data_columns[np.in1d(data_columns,momentum_columns)]
if momentum_columns is None:
momentum_columns_tmp = data_columns
else:
momentum_columns_tmp = data_columns[np.in1d(data_columns,momentum_columns)]
if moving_average_columns is None:
moving_average_columns_tmp = data_columns
else:
moving_average_columns_tmp = data_columns[np.in1d(data_columns,moving_average_columns)]
# Create Long/Short Net Exposure
long_array = np.array([])
short_array = np.array([])
spread_array = np.array([])
net_array = np.array([])
long_short_diff_array = np.array([])
short_ratio_array = np.array([])
for j in [i for i, s in enumerate(data_columns) if '_Long_All' in s]:
long_name = data_columns[j]
name_component = long_name[:long_name.index('_Long_All')]
short_name = name_component+'_Short_All'
long_short_diff_name = name_component+'_Long_Short_Diff'
short_ratio_name = name_component+'_Short_Ratio'
net_name = name_component+'_Net'
spread_name = name_component+'_Spread_All'
if (short_name in data_columns) and (spread_name in data_columns):
long_array = np.append(long_array,long_name)
short_array = np.append(short_array,short_name)
long_short_diff_array = np.append(long_short_diff_array,long_short_diff_name)
spread_array = np.append(spread_array,spread_name)
net_array = np.append(net_array,net_name)
short_ratio_array = np.append(short_ratio_array,short_ratio_name)
# Create Net Features
df_data[net_array] = df_data[long_array] - df_data[short_array].values + df_data[spread_array].values
df_data[long_short_diff_array] = df_data[long_array] - df_data[short_array].values
df_data[short_ratio_array] = df_data[short_array]/(df_data[short_array].values + df_data[long_array].values + df_data[spread_array].values)
# Add Net Features to Moving Average and Momentum Lists
momentum_columns_tmp = np.append(momentum_columns_tmp, [net_array, long_short_diff_array, short_ratio_array])
moving_average_columns_tmp = np.append(moving_average_columns_tmp, [net_array, long_short_diff_array, short_ratio_array])
df_data = df_data.reset_index()
# Momentum
mocolnames = ['mo_'+col+'_'+str(momentum_period) for col in momentum_columns_tmp]
df_data[mocolnames] = df_data[momentum_columns_tmp].diff(momentum_period)/df_data[momentum_columns_tmp].shift(momentum_period)
# Momentum 2nd Degree
momocolnames = ['momo_'+col+'_'+str(momentum_period) for col in momentum_columns_tmp]
df_data[momocolnames] = df_data[mocolnames].diff(momentum_period)
# Build Moving Average Features
for col in moving_average_columns_tmp:
for moving_average_period in moving_average_periods:
df_data['ma_'+col+'_'+str(moving_average_period)] = df_data[col].rolling(window=moving_average_period, center=False).mean()
if col in price_columns_tmp:
df_data['ma_'+col+'_target/'+str(moving_average_period)] = df_data['ma_'+col+'_'+str(moving_average_period)]/df_data[target]
for moving_average_period_num in moving_average_periods:
for moving_average_period_denom in moving_average_periods:
if moving_average_period_num < moving_average_period_denom:
df_data['ma_'+col+'_'+str(moving_average_period_num)+'/'+str(moving_average_period_denom)] = df_data['ma_'+col+'_'+str(moving_average_period_num)]/df_data['ma_'+col+'_'+str(moving_average_period_denom)]
#TODO: Add Feature Groups and Positioning Percentages
### Measure Feature Quantile Returns ###
for feature in df_data.drop(['date','asset',target], axis=1):
print('Analyzing '+ticker+'_'+feature)
# Clean Feature
df_feature = df_data[['date','asset',target,feature]].fillna(method='ffill').dropna().set_index(keys=['date','asset'])
# Ensure Feature has enough data to support binning (may not need this anymore)
feature_mean = df_feature[feature].mean()
feature_stdev = df_feature[feature].std()
if (feature_stdev>0) and (~np.isnan(feature_mean)) and (~np.isinf(feature_mean)) and (~np.isnan(feature_stdev)) and (~np.isinf(feature_stdev)):
# Build Factor and Factor Bins
#factor_data = df_feature[[feature,target]]
# Create Forward Returns
for period in forward_periods:
df_feature[period] = df_feature[target].pct_change(period).shift(-period)
# Compute Factor Z-Score
df_feature.rename(columns={feature: 'factor'}, inplace=True)
df_feature['factor_zscore'] = (df_feature['factor']-df_feature['factor'].rolling(window=400, min_periods=100).mean())/df_feature['factor'].rolling(window=400, min_periods=100).std()
# Bin Factor Z-Score (may not need this try, exception)
try:
df_feature['factor_bucket'] = df_feature['factor'].rolling(window=400, min_periods=100).apply(binrank)
except:
print('Exception!')
continue
else:
#df_feature['factor_bucket'] = df_feature['factor'].rolling(window=400, min_periods=100).apply(binrank)
df_feature = df_feature.dropna().reset_index()
train_data = df_feature[df_feature['date'] < train_end].set_index(keys=['date','asset'])
test_data = df_feature[df_feature['date'] >= train_end]
# Compute Mean, Std Error, and Sample Sizes
mean_return = train_data.groupby('factor_bucket')[forward_periods].mean()
std_err = train_data.groupby('factor_bucket')[forward_periods].std()
count_sample = train_data.groupby('factor_bucket')[forward_periods].count()
# Join results
df_factor_tmp = (mean_return/std_err).merge(right=mean_return, left_index=True, right_index=True, suffixes=['_zscore','_mean'])
df_factor_tmp = df_factor_tmp.merge(right=std_err, left_index=True, right_index=True, suffixes=['','_std_err'])
df_factor_tmp = df_factor_tmp.merge(right=count_sample, left_index=True, right_index=True, suffixes=['','_count'])
# Format results for storage
df_factor_tmp['feature'] = feature
df_factor_tmp['asset'] = ticker
df_factor_tmp = df_factor_tmp.reset_index().set_index(keys=['asset','feature','factor_bucket'])
df_factor = df_factor.append(df_factor_tmp)
# Format Test Data
#test_data = factor_data[factor_data['date'] >= train_end]
test_data['feature'] = feature
test_data = test_data.set_index(keys=['date','asset','feature'])
df_test = df_test.append(test_data)
# Save results periodically to avoid a full rerun on error
df_factor.to_csv('results/'+ticker+'_factors.csv')
# Save test data
df_test.to_csv('results/test_data.csv')
| mit |
maheshakya/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
pratapvardhan/scikit-learn | sklearn/datasets/twenty_newsgroups.py | 25 | 13704 | """Caching loader for the 20 newsgroups text classification dataset
The description of the dataset is available on the official website at:
http://people.csail.mit.edu/jrennie/20Newsgroups/
Quoting the introduction:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of my knowledge, it was originally collected
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
paper, though he does not explicitly mention this collection. The 20
newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
This dataset loader will download the recommended "by date" variant of the
dataset and which features a point in time split between the train and
test sets. The compressed dataset size is around 14 Mb compressed. Once
uncompressed the train set is 52 MB and the test set is 34 MB.
The data is downloaded, extracted and cached in the '~/scikit_learn_data'
folder.
The `fetch_20newsgroups` function will not vectorize the data into numpy
arrays but the dataset lists the filenames of the posts and their categories
as target labels.
The `fetch_20newsgroups_vectorized` function will in addition do a simple
tf-idf vectorization step.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import logging
import tarfile
import pickle
import shutil
import re
import codecs
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import Bunch
from .base import load_files
from .base import _pkl_filepath
from ..utils import check_random_state
from ..feature_extraction.text import CountVectorizer
from ..preprocessing import normalize
from ..externals import joblib, six
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
logger = logging.getLogger(__name__)
URL = ("http://people.csail.mit.edu/jrennie/"
"20Newsgroups/20news-bydate.tar.gz")
ARCHIVE_NAME = "20news-bydate.tar.gz"
CACHE_NAME = "20news-bydate.pkz"
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
def download_20newsgroups(target_dir, cache_path):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if os.path.exists(archive_path):
# Download is not complete as the .tar.gz file is removed after
# download.
logger.warning("Download was incomplete, downloading again.")
os.remove(archive_path)
logger.warning("Downloading dataset from %s (14 MB)", URL)
opener = urlopen(URL)
with open(archive_path, 'wb') as f:
f.write(opener.read())
logger.info("Decompressing %s", archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
# Store a zipped pickle
cache = dict(train=load_files(train_path, encoding='latin1'),
test=load_files(test_path, encoding='latin1'))
compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')
with open(cache_path, 'wb') as f:
f.write(compressed_content)
shutil.rmtree(target_dir)
return cache
def strip_newsgroup_header(text):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
"""
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
"""
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
"""
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
"""
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
def fetch_20newsgroups(data_home=None, subset='train', categories=None,
shuffle=True, random_state=42,
remove=(),
download_if_missing=True):
"""Load the filenames and data from the 20 newsgroups dataset.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify a download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
categories: None or collection of string or unicode
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle: bool, optional
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state: numpy random number generator or seed integer
Used to shuffle the dataset.
download_if_missing: optional, True by default
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
"""
data_home = get_data_home(data_home=data_home)
cache_path = _pkl_filepath(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(
compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
print('Downloading 20news dataset. This may take a few minutes.')
cache = download_20newsgroups(target_dir=twenty_home,
cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if subset in ('train', 'test'):
data = cache[subset]
elif subset == 'all':
data_lst = list()
target = list()
filenames = list()
for subset in ('train', 'test'):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
data.description = 'the 20 newsgroups by date dataset'
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
return data
def fetch_20newsgroups_vectorized(subset="train", remove=(), data_home=None):
"""Load the 20 newsgroups dataset and transform it into tf-idf vectors.
This is a convenience function; the tf-idf transformation is done using the
default settings for `sklearn.feature_extraction.text.Vectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom `Vectorizer` or `CountVectorizer`.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
Returns
-------
bunch : Bunch object
bunch.data: sparse matrix, shape [n_samples, n_features]
bunch.target: array, shape [n_samples]
bunch.target_names: list, length [n_classes]
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + ('-'.join(remove))
target_file = _pkl_filepath(data_home, filebase + ".pkl")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(data_home=data_home,
subset='train',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
data_test = fetch_20newsgroups(data_home=data_home,
subset='test',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
if os.path.exists(target_file):
X_train, X_test = joblib.load(target_file)
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
joblib.dump((X_train, X_test), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
normalize(X_train, copy=False)
normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
else:
raise ValueError("%r is not a valid subset: should be one of "
"['train', 'test', 'all']" % subset)
return Bunch(data=data, target=target, target_names=target_names)
| bsd-3-clause |
ishay2b/tensorflow | tensorflow/python/estimator/inputs/pandas_io_test.py | 89 | 8340 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pandas_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
class PandasIoTest(test.TestCase):
def makeTestDataFrame(self):
index = np.arange(100, 104)
a = np.arange(4)
b = np.arange(32, 36)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -28), index=index)
return x, y
def callInputFnOnce(self, input_fn, session):
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
result_values = session.run(results)
coord.request_stop()
coord.join(threads)
return result_values
def testPandasInputFn_IndexMismatch(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaises(ValueError):
pandas_io.pandas_input_fn(
x, y_noindex, batch_size=2, shuffle=False, num_epochs=1)
def testPandasInputFn_NonBoolShuffle(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaisesRegexp(TypeError,
'shuffle must be explicitly set as boolean'):
# Default shuffle is None
pandas_io.pandas_input_fn(x, y_noindex)
def testPandasInputFn_ProducesExpectedOutputs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
def testPandasInputFn_ProducesOutputsForLargeBatchAndMultipleEpochs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 102)
a = np.arange(2)
b = np.arange(32, 34)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -30), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=128, shuffle=False, num_epochs=2)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1, 0, 1])
self.assertAllEqual(features['b'], [32, 33, 32, 33])
self.assertAllEqual(target, [-32, -31, -32, -31])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_ProducesOutputsWhenDataSizeNotDividedByBatchSize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 105)
a = np.arange(5)
b = np.arange(32, 37)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -27), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
features, target = session.run(results)
self.assertAllEqual(features['a'], [2, 3])
self.assertAllEqual(features['b'], [34, 35])
self.assertAllEqual(target, [-30, -29])
features, target = session.run(results)
self.assertAllEqual(features['a'], [4])
self.assertAllEqual(features['b'], [36])
self.assertAllEqual(target, [-28])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_OnlyX(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, _ = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y=None, batch_size=2, shuffle=False, num_epochs=1)
features = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
def testPandasInputFn_ExcludesIndex(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, _ = self.callInputFnOnce(input_fn, session)
self.assertFalse('index' in features)
def assertInputsCallableNTimes(self, input_fn, session, n):
inputs = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
for _ in range(n):
session.run(inputs)
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_RespectsEpoch_NoShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=False, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=True, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffleAutosize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, queue_capacity=None, num_epochs=2)
self.assertInputsCallableNTimes(input_fn, session, 4)
def testPandasInputFn_RespectsEpochUnevenBatches(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
with self.test_session() as session:
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=3, shuffle=False, num_epochs=1)
# Before the last batch, only one element of the epoch should remain.
self.assertInputsCallableNTimes(input_fn, session, 2)
def testPandasInputFn_Idempotent(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, num_epochs=1)()
if __name__ == '__main__':
test.main()
| apache-2.0 |
aborovin/trading-with-python | cookbook/reconstructVXX/downloadVixFutures.py | 77 | 3012 | #-------------------------------------------------------------------------------
# Name: download CBOE futures
# Purpose: get VIX futures data from CBOE, process data to a single file
#
#
# Created: 15-10-2011
# Copyright: (c) Jev Kuznetsov 2011
# Licence: BSD
#-------------------------------------------------------------------------------
#!/usr/bin/env python
from urllib import urlretrieve
import os
from pandas import *
import datetime
import numpy as np
m_codes = ['F','G','H','J','K','M','N','Q','U','V','X','Z'] #month codes of the futures
codes = dict(zip(m_codes,range(1,len(m_codes)+1)))
#dataDir = os.path.dirname(__file__)+'/data'
dataDir = os.path.expanduser('~')+'/twpData/vixFutures'
print 'Data directory: ', dataDir
def saveVixFutureData(year,month, path, forceDownload=False):
''' Get future from CBOE and save to file '''
fName = "CFE_{0}{1}_VX.csv".format(m_codes[month],str(year)[-2:])
if os.path.exists(path+'\\'+fName) or forceDownload:
print 'File already downloaded, skipping'
return
urlStr = "http://cfe.cboe.com/Publish/ScheduledTask/MktData/datahouse/{0}".format(fName)
print 'Getting: %s' % urlStr
try:
urlretrieve(urlStr,path+'\\'+fName)
except Exception as e:
print e
def buildDataTable(dataDir):
""" create single data sheet """
files = os.listdir(dataDir)
data = {}
for fName in files:
print 'Processing: ', fName
try:
df = DataFrame.from_csv(dataDir+'/'+fName)
code = fName.split('.')[0].split('_')[1]
month = '%02d' % codes[code[0]]
year = '20'+code[1:]
newCode = year+'_'+month
data[newCode] = df
except Exception as e:
print 'Could not process:', e
full = DataFrame()
for k,df in data.iteritems():
s = df['Settle']
s.name = k
s[s<5] = np.nan
if len(s.dropna())>0:
full = full.join(s,how='outer')
else:
print s.name, ': Empty dataset.'
full[full<5]=np.nan
full = full[sorted(full.columns)]
# use only data after this date
startDate = datetime.datetime(2008,1,1)
idx = full.index >= startDate
full = full.ix[idx,:]
#full.plot(ax=gca())
fName = os.path.expanduser('~')+'/twpData/vix_futures.csv'
print 'Saving to ', fName
full.to_csv(fName)
if __name__ == '__main__':
if not os.path.exists(dataDir):
print 'creating data directory %s' % dataDir
os.makedirs(dataDir)
for year in range(2008,2013):
for month in range(12):
print 'Getting data for {0}/{1}'.format(year,month+1)
saveVixFutureData(year,month,dataDir)
print 'Raw wata was saved to {0}'.format(dataDir)
buildDataTable(dataDir) | bsd-3-clause |
Titan-C/scikit-learn | examples/linear_model/plot_sparse_recovery.py | 11 | 7453 | """
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/sites/default/files/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.exceptions import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
linalg.pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
| bsd-3-clause |
luo66/scikit-learn | benchmarks/bench_random_projections.py | 397 | 8900 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause |
flightgong/scikit-learn | sklearn/linear_model/tests/test_sgd.py | 3 | 31580 | import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
##
## Test Data
##
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
##
## Classification Test Case
##
class CommonTest(object):
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
#... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
"""Input format tests. """
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
"""Test whether clone works ok. """
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory = SGDClassifier
def test_sgd(self):
"""Check that SGD gives any results :-)"""
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
#assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
"""Check whether expected ValueError on bad l1_ratio"""
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
"""Check whether expected ValueError on bad learning_rate"""
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
"""Check whether expected ValueError on bad eta0"""
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
"""Check whether expected ValueError on bad alpha"""
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
"""Check whether expected ValueError on bad penalty"""
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
"""Check whether expected ValueError on bad loss"""
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
"""Test parameter validity check"""
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
"""Test parameter validity check"""
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
"""Checks coef_init not allowed as model argument (only fit)"""
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
"""Checks coef_init shape for the warm starts"""
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
"""Checks intercept_ shape for the warm starts"""
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
"""Checks intercept_ shape for the warm starts in binary case"""
self.factory().fit(X5, Y5, intercept_init=0)
def test_set_intercept_to_intercept(self):
"""Checks intercept_ shape consistency for the warm starts"""
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
"""Target must have at least two labels"""
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_sgd_multiclass(self):
"""Multi-class test case"""
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_with_init_coef(self):
"""Multi-class test case"""
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
"""Multi-class test case with multi-core support"""
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
"""Checks coef_init and intercept_init shape for for multi-class
problems"""
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
"""Check SGD.predict_proba"""
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([3, 2])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([-1, -1])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([3, 2])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([-1, -1])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([-1, -1])
d = clf.decision_function([-1, -1])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([3, 2])
p = clf.predict_proba([3, 2])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([-1, -1])
p = clf.predict_proba([-1, -1])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([3, 2])
p = clf.predict_proba([3, 2])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function(x)
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba(x)
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
"""Test L1 regularization"""
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
"""
Test class weights.
"""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
"""Test if equal class weights approx. equals no class weights. """
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
"""ValueError due to not existing class label."""
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
"""ValueError due to wrong class_weight argument type."""
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_auto_weight(self):
"""Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X)), 0.96,
decimal=1)
# make the same prediction using automated class_weight
clf_auto = self.factory(alpha=0.0001, n_iter=1000,
class_weight="auto").fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_auto.predict(X)), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "auto"
assert_array_almost_equal(clf.coef_, clf_auto.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred), 0.96)
# fit a model with auto class_weight enabled
clf = self.factory(n_iter=1000, class_weight="auto")
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="auto")
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred), 0.96)
def test_sample_weights(self):
"""Test weights on individual samples"""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
"""Test if ValueError is raised if sample_weight has wrong shape"""
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_fit_then_partial_fit(self):
"""Partial_fit should work after initial fit in the multiclass case.
Non-regression test for #2496; fit would previously produce a
Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
"""
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
"""Test multiple calls of fit w/ different shaped inputs."""
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory = SGDRegressor
def test_sgd(self):
"""Check that SGD gives any results."""
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
"""Check whether expected ValueError on bad penalty"""
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
"""Check whether expected ValueError on bad loss"""
self.factory(loss="foobar")
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
"""Check that the SGD output is consistent with coordinate descent"""
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory = SparseSGDRegressor
def test_l1_ratio():
"""Test if l1 ratio extremes match L1 and L2 penalty settings. """
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1').fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2').fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
# Generate some weird data with unscaled features
rng = np.random.RandomState(42)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, 0] *= 100
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(scale(X), ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(scale(X), y)
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
| bsd-3-clause |
andaag/scikit-learn | sklearn/neighbors/tests/test_ball_tree.py | 129 | 10192 | import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.ball_tree import (BallTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
rng = np.random.RandomState(10)
V = rng.rand(3, 3)
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=np.random.random(DIMENSION)),
'wminkowski': dict(p=3, w=np.random.random(DIMENSION)),
'mahalanobis': dict(V=V)}
DISCRETE_METRICS = ['hamming',
'canberra',
'braycurtis']
BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath']
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_ball_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
bt = BallTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = bt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_ball_tree_query_boolean_metrics():
np.random.seed(0)
X = np.random.random((40, 10)).round(0)
Y = np.random.random((10, 10)).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in BOOLEAN_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_discrete_metrics():
np.random.seed(0)
X = (4 * np.random.random((40, 10))).round(0)
Y = (4 * np.random.random((10, 10))).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in DISCRETE_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = bt.query_radius(query_pt, r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_ball_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = bt.query_radius(query_pt, r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_ball_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
bt = BallTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = bt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true,
atol=atol, rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
bt = BallTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old version of scipy, doesn't accept "
"explicit bandwidth.")
dens_bt = bt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_bt, dens_gkde, decimal=3)
def test_ball_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
bt = BallTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = bt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_ball_tree_pickle():
np.random.seed(0)
X = np.random.random((10, 3))
bt1 = BallTree(X, leaf_size=1)
# Test if BallTree with callable metric is picklable
bt1_pyfunc = BallTree(X, metric=dist_func, leaf_size=1, p=2)
ind1, dist1 = bt1.query(X)
ind1_pyfunc, dist1_pyfunc = bt1_pyfunc.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(bt1, protocol=protocol)
bt2 = pickle.loads(s)
s_pyfunc = pickle.dumps(bt1_pyfunc, protocol=protocol)
bt2_pyfunc = pickle.loads(s_pyfunc)
ind2, dist2 = bt2.query(X)
ind2_pyfunc, dist2_pyfunc = bt2_pyfunc.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1_pyfunc, ind2_pyfunc)
assert_array_almost_equal(dist1_pyfunc, dist2_pyfunc)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
def test_query_haversine():
np.random.seed(0)
X = 2 * np.pi * np.random.random((40, 2))
bt = BallTree(X, leaf_size=1, metric='haversine')
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine')
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
| bsd-3-clause |
SimonBussy/C-mix | QNEM/inference.py | 1 | 27790 | # -*- coding: utf-8 -*-
# Author: Simon Bussy <[email protected]>
import numpy as np
from datetime import datetime
from QNEM.history import History
from time import time
from scipy.optimize import fmin_l_bfgs_b
from lifelines.utils import concordance_index as c_index_score
class Learner:
"""The base class for a Solver.
Not intended for end-users, but for development only.
It should be sklearn-learn compliant
Parameters
----------
verbose : `bool`, default=True
If `True`, we verbose things, otherwise the solver does not
print anything (but records information in history anyway)
print_every : `int`, default=10
Print history information when ``n_iter`` (iteration number) is
a multiple of ``print_every``
"""
def __init__(self, verbose=True, print_every=10):
self.verbose = verbose
self.print_every = print_every
self.history = History()
def _init_coeffs(self, n_features):
self.coeffs = np.empty(n_features)
self.n_features = n_features
def _start_solve(self):
# Reset history
self.history.clear()
self.time_start = Learner._get_now()
self._numeric_time_start = time()
if self.verbose:
print("Launching the solver " + self.__class__.__name__ + "...")
def _end_solve(self):
self.time_end = self._get_now()
t = time()
self.time_elapsed = t - self._numeric_time_start
if self.verbose:
print("Done solving using " + self.__class__.__name__ + " in "
+ "%.2e seconds" % self.time_elapsed)
@staticmethod
def _get_now():
return str(datetime.now()).replace(" ", "_").replace(":", "-")
def get_history(self, key=None):
"""Return history of the solver
Parameters
----------
key : str [optional, default=None]
if None all history is returned as a dict
if str then history of the required key is given
Returns
-------
output : dict or list
if key is None or key is not in history then output is
dict containing history of all keys
if key is not None and key is in history, then output is a list
containing history for the given key
"""
val = self.history.values.get(key, None)
if val is None:
return self.history.values
else:
return val
class MixtureGeoms(Learner):
"""EM Algorithm for fitting a censoring mixture of geometric distributions
with two components
Parameters
----------
tol : `float`, default=1e-5
The tolerance of the solver (iterations stop when the stopping
criterion is below it). By default the solver does ``max_iter``
iterations
max_iter : `int`, default=100
Maximum number of iterations of the solver
verbose : `bool`, default=True
If `True`, we verbose things, otherwise the solver does not
print anything (but records information in history anyway)
print_every : `int`, default=10
Print history information when ``n_iter`` (iteration number) is
a multiple of ``print_every``
"""
def __init__(self, max_iter=100, verbose=True, print_every=10, tol=1e-5):
Learner.__init__(self, verbose=verbose, print_every=print_every)
self.max_iter = max_iter
self.verbose = verbose
self.print_every = print_every
self.tol = tol
self._init_coeffs(4)
# Attributes that will be instantiated afterwards
self.p0 = None
self.p1 = None
self.pc = None
self.pi = None
@staticmethod
def log_lik(Y, delta, coeffs):
"""Computes the log-likelihood of the censoring mixture of two
geometric distributions
Parameters
----------
Y : `np.ndarray`, shape=(n_samples,)
Times of the event of interest
delta : `np.ndarray`, shape=(n_samples,)
Censoring indicator
coeffs : `np.ndarray`, shape=(4,)
coeffs[0] : Shape parameter of the geometric distribution for the
first component
coeffs[1] : Shape parameter of the geometric distribution for the
second component
coeffs[2] : Shape parameter of the geometric distribution for the
censoring component
coeffs[3] : The mixture parameter
Returns
-------
The value of the log-likelihood
"""
p0, p1, pc, pi = coeffs[0], coeffs[1], coeffs[2], coeffs[3]
prb = ((pi * p0 * (1. - p0) ** (Y - 1.)
+ (1. - pi) * p1 * (1. - p1) ** (Y - 1.)
) * (1. - pc) ** Y
) ** delta \
* ((pi * (1 - p0) ** Y
+ (1. - pi) * (1. - p1) ** Y
) * pc * (1. - pc) ** (Y - 1.)
) ** (1. - delta)
return np.mean(np.log(prb))
def fit(self, Y, delta, model='C-mix'):
"""Fit the censoring mixture of geometric distributions
with two components
Parameters
----------
Y : `np.ndarray`, shape=(n_samples,)
Times of the event of interest
delta : `np.ndarray`, shape=(n_samples,)
Censoring indicator
model : 'C-mix', 'CURE', default='C-mix'
The model to be fitted
"""
verbose = self.verbose
max_iter = self.max_iter
print_every = self.print_every
tol = self.tol
self._start_solve()
# Split at random the sample with probability 0.5
n_samples = Y.shape[0]
pi = 0.5
Z = np.random.binomial(1, pi, size=n_samples)
p1 = 1. / np.mean(Y[(delta == 1) + (Z == 1)])
p0 = 1. / np.mean(Y[(delta == 1) + (Z == 0)])
if p0 > p1:
tmp = p0
p0 = p1
p1 = tmp
if model == 'CURE':
p0 = 0
pc = 1. / np.mean(Y[delta == 0])
log_lik = self.log_lik(Y, delta, np.array([p0, p1, pc, pi]))
obj = -log_lik
rel_obj = 1.
self.history.update(n_iter=0, obj=obj, rel_obj=rel_obj)
if verbose:
self.history.print_history()
for n_iter in range(max_iter):
if n_iter % print_every == 0:
self.history.update(n_iter=n_iter, obj=obj,
rel_obj=rel_obj)
if verbose:
self.history.print_history()
# E-Step
a = ((1. - p1) ** (Y - 1.) * p1) ** delta * ((1. - p1) ** Y) ** (
1. - delta) * (1. - pi)
b = ((1. - p0) ** (Y - 1.) * p0) ** delta * ((1. - p0) ** Y) ** (
1. - delta) * pi
q = a / (a + b)
# M-Step
if model == 'C-mix':
p0 = ((1. - q) * delta).mean() / ((1. - q) * Y).mean()
p1 = (delta * q).mean() / (q * Y).mean()
pi = 1. - np.mean(q)
prev_obj = obj
log_lik = self.log_lik(Y, delta, np.array([p0, p1, pc, pi]))
obj = -log_lik
rel_obj = abs(obj - prev_obj) / abs(prev_obj)
if (n_iter > max_iter) or (rel_obj < tol):
break
n_iter += 1
self.history.update(n_iter=n_iter, obj=obj, rel_obj=rel_obj)
if verbose:
self.history.print_history()
self._end_solve()
self.p0 = p0
self.p1 = p1
self.pc = pc
self.pi = pi
self.coeffs[:] = np.array([p0, p1, pc, pi])
class QNEM(Learner):
"""QNEM Algorithm for fitting a censoring mixture of geometric distributions
with two components and elasticNet regularization
Parameters
----------
model : 'C-mix', 'CURE', default='C-mix'
The model to be fitted
fit_intercept : `bool`, default=True
If `True`, include an intercept in the model
l_elastic_net : `float`, default=0
Level of ElasticNet penalization
eta: `float`, default=0.1
The ElasticNet mixing parameter, with 0 <= eta <= 1.
For eta = 0 this is ridge (L2) regularization
For eta = 1 this is lasso (L1) regularization
For 0 < eta < 1, the regularization is a linear combination
of L1 and L2
max_iter : `int`, default=100
Maximum number of iterations of the solver
verbose : `bool`, default=True
If `True`, we verbose things, otherwise the solver does not
print anything (but records information in history anyway)
print_every : `int`, default=10
Print history information when ``n_iter`` (iteration number) is
a multiple of ``print_every``
tol : `float`, default=1e-5
The tolerance of the solver (iterations stop when the stopping
criterion is below it). By default the solver does ``max_iter``
iterations
warm_start : `bool`, default=False
If true, learning will start from the last reached solution
"""
def __init__(self, model="C-mix", fit_intercept=False, l_elastic_net=0.,
eta=.1, max_iter=100, verbose=True, print_every=1, tol=1e-5,
warm_start=False):
Learner.__init__(self, verbose=verbose, print_every=print_every)
self.l_elastic_net = l_elastic_net
self.eta = eta
self.max_iter = max_iter
self.tol = tol
self.warm_start = warm_start
self.model = model
self.fit_intercept = fit_intercept
# Attributes that will be instantiated afterwards
self.coeffs = None
self.coeffs_ext = None
self.p1 = None
self.p0 = None
self.pc = None
self.pi = None
self.avg_scores = None
self.scores = None
self.l_elastic_net_best = None
self.l_elastic_net_chosen = None
self.grid_elastic_net = None
self.n_features = None
self.n_samples = None
self.adaptative_grid = None
self.grid_size = None
@staticmethod
def logistic_grad(z):
"""Overflow proof computation of 1 / (1 + exp(-z)))
"""
idx_pos = np.where(z >= 0.)
idx_neg = np.where(z < 0.)
res = np.empty(z.shape)
res[idx_pos] = 1. / (1. + np.exp(-z[idx_pos]))
res[idx_neg] = 1 - 1. / (1. + np.exp(z[idx_neg]))
return res
@staticmethod
def logistic_loss(z):
"""Overflow proof computation of log(1 + exp(-z))
"""
idx_pos = np.where(z >= 0.)
idx_neg = np.where(z < 0.)
res = np.empty(z.shape)
res[idx_pos] = np.log(1. + np.exp(-z[idx_pos]))
z_neg = z[idx_neg]
res[idx_neg] = -z_neg + np.log(1. + np.exp(z_neg))
return res
def _func_pen(self, coeffs_ext):
"""Computes the elasticNet penalization of the global objective to be
minimized by the QNEM algorithm
Parameters
----------
coeffs_ext: `np.ndarray`, shape=(2*n_features,)
The parameters of the mixture decompose
on positive and negative parts
Returns
-------
output : `float`
The value of the penalization of the global objective
"""
l_elastic_net = self.l_elastic_net
eta = self.eta
n_features = self.n_features
coeffs = coeffs_ext[:n_features] - coeffs_ext[n_features:]
return l_elastic_net * ((1. - eta) * coeffs_ext.sum()
+ 0.5 * eta * np.linalg.norm(coeffs) ** 2)
def _grad_pen(self, coeffs):
"""Computes the gradient of the elasticNet penalization of the global
objective to be minimized by the QNEM algorithm
Parameters
----------
coeffs : `np.ndarray`, shape=(n_features,)
The parameters of the mixture
Returns
-------
output : `float`
The gradient of the penalization of the global objective
"""
l_elastic_net = self.l_elastic_net
eta = self.eta
n_features = self.n_features
grad = np.zeros(2 * n_features)
# Gradient of lasso penalization
grad += l_elastic_net * (1 - eta)
# Gradient of ridge penalization
grad_pos = (l_elastic_net * eta)
grad[:n_features] += grad_pos * coeffs
grad[n_features:] -= grad_pos * coeffs
return grad
def _log_lik(self, X, Y, delta):
"""Computes the likelihood of the censoring mixture model
Parameters
----------
X : `np.ndarray`, shape=(n_samples, n_features)
The features matrix
Y : `np.ndarray`, shape=(n_samples,)
Times of the event of interest
delta : `np.ndarray`, shape=(n_samples,)
Censoring indicator
Returns
-------
output : `float`
The log-likelihood computed on the given data
"""
pi = self.predict_proba(X, self.fit_intercept, self.coeffs)
p0, p1, pc = self.p0, self.p1, self.pc
prb = ((pi * p0 * (1. - p0) ** (Y - 1.)
+ (1. - pi) * p1 * (1. - p1) ** (Y - 1.)
) * (1. - pc) ** Y
) ** delta \
* ((pi * (1 - p0) ** Y
+ (1. - pi) * (1. - p1) ** Y
) * pc * (1. - pc) ** (Y - 1.)
) ** (1. - delta)
return np.mean(np.log(prb))
def _func_obj(self, X, Y, delta, coeffs_ext):
"""The global objective to be minimized by the QNEM algorithm
(including penalization)
Parameters
----------
X : `np.ndarray`, shape=(n_samples, n_features)
The features matrix
Y : `np.ndarray`, shape=(n_samples,)
Times of the event of interest
delta : `np.ndarray`, shape=(n_samples,)
Censoring indicator
coeffs_ext : `np.ndarray`, shape=(2*n_features,)
The parameters of the mixture decompose
on positive and negative parts
Returns
-------
output : `float`
The value of the global objective to be minimized
"""
n_features = self.n_features
if self.fit_intercept:
coeffs = coeffs_ext[:n_features + 1] - coeffs_ext[n_features + 1:]
coeffs_ext = np.delete(coeffs_ext, [0, n_features + 1])
else:
coeffs = coeffs_ext[:n_features] - coeffs_ext[n_features:]
self.coeffs = coeffs
log_lik = self._log_lik(X, Y, delta)
pen = self._func_pen(coeffs_ext)
return -log_lik + pen
def _func_sub_obj(self, X, q, coeffs_ext):
"""Computes the sub objective, namely the function to be minimized at
each QNEM iteration using fmin_l_bfgs_b, for the incidence part
of the model. It computes
mean(q_i x_i^Y beta + log(1 + exp(-x_i^Y beta))
+ penalization(beta)
Parameters
----------
X : `np.ndarray`, shape=(n_samples, n_features)
The features matrix
q : `np.ndarray`, shape=(n_samples,)
The soft-assignments obtained by the E-step
coeffs_ext : `np.ndarray`, shape=(2*n_features,)
The parameters of the mixture decompose
on positive and negative parts
Returns
-------
output : `float`
The value of the sub objective to be minimized at each QNEM step
"""
n_features = self.n_features
if self.fit_intercept:
coeffs = coeffs_ext[:n_features + 1] - coeffs_ext[n_features + 1:]
coeffs_0 = coeffs[0]
coeffs = coeffs[1:]
coeffs_ext = np.delete(coeffs_ext, [0, n_features + 1])
else:
coeffs_0 = 0
coeffs = coeffs_ext[:n_features] - coeffs_ext[n_features:]
pen = self._func_pen(coeffs_ext)
u = coeffs_0 + X.dot(coeffs)
sub_obj = (q * u + self.logistic_loss(u)).mean()
return sub_obj + pen
def _grad_sub_obj(self, X, q, coeffs_ext):
"""Computes the gradient of the sub objective used in fmin_l_bfgs_b
Parameters
----------
X : `np.ndarray`, shape=(n_samples, n_features)
The features matrix
q : `np.ndarray`, shape=(n_samples,)
The soft-assignments obtained by the E-step
coeffs_ext : `np.ndarray`, shape=(2*n_features,)
The parameters of the mixture decompose
on positive and negative parts
Returns
-------
output : `float`
The value of the sub objective gradient at each QNEM step
"""
n_features = self.n_features
n_samples = self.n_samples
if self.fit_intercept:
coeffs = coeffs_ext[:n_features + 1] - coeffs_ext[n_features + 1:]
coeffs_0 = coeffs[0]
coeffs = coeffs[1:]
else:
coeffs_0 = 0
coeffs = coeffs_ext[:n_features] - coeffs_ext[n_features:]
grad_pen = self._grad_pen(coeffs)
u = coeffs_0 + X.dot(coeffs)
if self.fit_intercept:
X = np.concatenate((np.ones(n_samples).reshape(1, n_samples).T, X),
axis=1)
grad_pen = np.concatenate([[0], grad_pen[:n_features], [0],
grad_pen[n_features:]])
grad = (X * (q - self.logistic_grad(-u)).reshape(n_samples, 1)).mean(
axis=0)
grad_sub_obj = np.concatenate([grad, -grad])
return grad_sub_obj + grad_pen
def fit(self, X, Y, delta):
"""Fit the supervised censoring mixture of geometric distributions.
After the call to the method, trained parameters are saved
in self.p0, self.p1 and self.coeffs
Parameters
----------
X : `np.ndarray`, shape=(n_samples, n_features)
The features matrix
Y : `np.ndarray`, shape=(n_samples,)
Times of the event of interest
delta : `np.ndarray`, shape=(n_samples,)
Censoring indicator
"""
verbose = self.verbose
max_iter = self.max_iter
print_every = self.print_every
tol = self.tol
warm_start = self.warm_start
model = self.model
fit_intercept = self.fit_intercept
n_samples, n_features = X.shape
self.n_samples = n_samples
self.n_features = n_features
self._start_solve()
# Initialize coeffs to 0. which makes pi all equal to 0.5
if fit_intercept:
n_features += 1
coeffs = np.zeros(n_features)
coeffs_ext = np.zeros(2 * n_features)
func_obj = self._func_obj
func_sub_obj = self._func_sub_obj
grad_sub_obj = self._grad_sub_obj
# We initialize p0 and p1 by fitting a censoring mixture of geometrics
mixt_geoms = MixtureGeoms(max_iter=max_iter, verbose=False,
print_every=print_every, tol=tol)
mixt_geoms.fit(Y, delta, model)
p0 = mixt_geoms.p0
p1 = mixt_geoms.p1
pc = mixt_geoms.pc
self.p0, self.p1, self.pc = p0, p1, pc
if verbose:
print("init: p0=%s" % p0)
print("init: p1=%s" % p1)
obj = func_obj(X, Y, delta, coeffs_ext)
rel_obj = 1.
# Bounds vector for the L-BGFS-B algorithm
bounds = [(0, None)] * n_features * 2
for n_iter in range(max_iter):
if n_iter % print_every == 0:
self.history.update(n_iter=n_iter, obj=obj, rel_obj=rel_obj)
if verbose:
self.history.print_history()
pi = self.predict_proba(X, fit_intercept, coeffs)
# E-Step
a = ((1. - p1) ** (Y - 1.) * p1) ** delta * ((1. - p1) ** Y) ** (
1. - delta) * (1. - pi)
b = ((1. - p0) ** (Y - 1.) * p0) ** delta * ((1. - p0) ** Y) ** (
1. - delta) * pi
q = a / (a + b)
# M-Step
if model == 'C-mix':
p0 = ((1. - q) * delta).mean() / ((1. - q) * Y).mean()
p1 = (delta * q).mean() / (q * Y).mean()
self.p0, self.p1 = p0, p1
if warm_start:
x0 = coeffs_ext
else:
x0 = np.zeros(2 * n_features)
coeffs_ext = fmin_l_bfgs_b(
func=lambda coeffs_ext_: func_sub_obj(X, q, coeffs_ext_),
x0=x0,
fprime=lambda coeffs_ext_: grad_sub_obj(X, q, coeffs_ext_),
disp=False,
bounds=bounds,
maxiter=60,
# pgtol=1e-20
pgtol=1e-5
)[0]
coeffs = coeffs_ext[:n_features] - coeffs_ext[n_features:]
prev_obj = obj
obj = func_obj(X, Y, delta, coeffs_ext)
rel_obj = abs(obj - prev_obj) / abs(prev_obj)
if (n_iter > max_iter) or (rel_obj < tol):
break
n_iter += 1
self.history.update(n_iter=n_iter, obj=obj, rel_obj=rel_obj)
if verbose:
self.history.print_history()
print("At the end: p0=%s" % p0)
print("At the end: p1=%s" % p1)
self._end_solve()
self.p0, self.p1 = p0, p1
self.pi = pi
# self.coeffs = -coeffs
self.coeffs = coeffs
@staticmethod
def predict_proba(X, fit_intercept, coeffs):
"""Probability estimates for being on the high-risk group.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : `np.ndarray`, shape=(n_samples, n_features)
Input features matrix
fit_intercept : `bool`
If `True`, include an intercept in the model
coeffs : `np.ndarray`, shape=(n_features,)
The parameters of the mixture
Returns
-------
output : `np.ndarray`, shape=(n_samples,)
Returns the probability of the sample for being on
the high-risk group
"""
if fit_intercept:
coeffs_0 = coeffs[0]
coeffs = coeffs[1:]
else:
coeffs_0 = 0
u = coeffs_0 + X.dot(coeffs)
return QNEM.logistic_grad(u)
def score(self, X, Y, delta, metric):
"""Computes the score with the trained parameters on the given data,
either log-likelihood or C-index
Parameters
----------
X : `np.ndarray`, shape=(n_samples, n_features)
The features matrix
Y : `np.ndarray`, shape=(n_samples,)
Times of the event of interest
delta : `np.ndarray`, shape=(n_samples,)
Censoring indicator
metric : 'log_lik', 'C-index'
Either computes log-likelihood or C-index
Returns
-------
output : `float`
The score computed on the given data
"""
if metric == 'log_lik':
return self._log_lik(X, Y, delta)
if metric == 'C-index':
return c_index_score(Y, self.predict_proba(X, self.fit_intercept,
self.coeffs), delta)
def cross_validate(self, X, Y, delta, n_folds=3, eta=0.1,
adaptative_grid=True, grid_size=50,
grid_elastic_net=np.array([0]), shuffle=True,
verbose=True, metric='log_lik'):
"""Apply n_folds cross-validation using the given data, to select the
best penalization parameter
Parameters
----------
X : `np.ndarray`, shape=(n_samples, n_features)
The features matrix
Y : `np.ndarray`, shape=(n_samples,)
Times of the event of interest
delta : `np.ndarray`, shape=(n_samples,)
Censoring indicator
n_folds : `int`, default=3
Number of folds. Must be at least 2.
eta : `float`, default=0.1
The ElasticNet mixing parameter, with 0 <= eta <= 1.
For eta = 0 this is ridge (L2) regularization
For eta = 1 this is lasso (L1) regularization
For 0 < eta < 1, the regularization is a linear combination
of L1 and L2
adaptative_grid : `bool`, default=True
If `True`, adapt the ElasticNet strength parameter grid using the
KKT conditions
grid_size : `int`, default=50
Grid size if adaptative_grid=`True`
grid_elastic_net : `np.ndarray`, default=np.array([0])
Grid of ElasticNet strength parameters to be run through, if
adaptative_grid=`False`
shuffle : `bool`, default=True
Whether to shuffle the data before splitting into batches
verbose : `bool`, default=True
If `True`, we verbose things, otherwise the solver does not
print anything (but records information in history anyway)
metric : 'log_lik', 'C-index', default='log_lik'
Either computes log-likelihood or C-index
"""
from sklearn.model_selection import KFold
n_samples = Y.shape[0]
cv = KFold(n_splits=n_folds, shuffle=shuffle)
self.grid_elastic_net = grid_elastic_net
self.adaptative_grid = adaptative_grid
self.grid_size = grid_size
tol = self.tol
warm_start = self.warm_start
model = self.model
if adaptative_grid:
# from KKT conditions
gamma_max = 1. / np.log(10.) * np.log(
1. / (1. - eta) * (.5 / n_samples)
* np.absolute(X).sum(axis=0).max())
grid_elastic_net = np.logspace(gamma_max - 4, gamma_max, grid_size)
learners = [
QNEM(verbose=False, tol=tol, eta=eta, warm_start=warm_start,
model=model, fit_intercept=self.fit_intercept)
for _ in range(n_folds)
]
n_grid_elastic_net = grid_elastic_net.shape[0]
scores = np.empty((n_grid_elastic_net, n_folds))
if verbose is not None:
verbose = self.verbose
for idx_elasticNet, l_elastic_net in enumerate(grid_elastic_net):
if verbose:
print("Testing l_elastic_net=%.2e" % l_elastic_net, "on fold ",
end="")
for n_fold, (idx_train, idx_test) in enumerate(cv.split(X)):
if verbose:
print(" " + str(n_fold), end="")
X_train, X_test = X[idx_train], X[idx_test]
Y_train, Y_test = Y[idx_train], Y[idx_test]
delta_train, delta_test = delta[idx_train], delta[idx_test]
learner = learners[n_fold]
learner.l_elastic_net = l_elastic_net
learner.fit(X_train, Y_train, delta_train)
scores[idx_elasticNet, n_fold] = learner.score(
X_test, Y_test, delta_test, metric)
if verbose:
print(": avg_score=%.2e" % scores[idx_elasticNet, :].mean())
avg_scores = scores.mean(1)
std_scores = scores.std(1)
idx_best = avg_scores.argmax()
l_elastic_net_best = grid_elastic_net[idx_best]
idx_chosen = max([i for i, j in enumerate(
list(avg_scores >= avg_scores.max() - std_scores[idx_best])) if j])
l_elastic_net_chosen = grid_elastic_net[idx_chosen]
self.grid_elastic_net = grid_elastic_net
self.l_elastic_net_best = l_elastic_net_best
self.l_elastic_net_chosen = l_elastic_net_chosen
self.scores = scores
self.avg_scores = avg_scores
| mit |
webmasterraj/GaSiProMo | flask/lib/python2.7/site-packages/pandas/computation/engines.py | 15 | 3732 | """Engine classes for :func:`~pandas.eval`
"""
import abc
from pandas import compat
from pandas.compat import DeepChainMap, map
from pandas.core import common as com
from pandas.computation.align import _align, _reconstruct_object
from pandas.computation.ops import UndefinedVariableError, _mathops, _reductions
_ne_builtins = frozenset(_mathops + _reductions)
class NumExprClobberingError(NameError):
pass
def _check_ne_builtin_clash(expr):
"""Attempt to prevent foot-shooting in a helpful way.
Parameters
----------
terms : Term
Terms can contain
"""
names = expr.names
overlap = names & _ne_builtins
if overlap:
s = ', '.join(map(repr, overlap))
raise NumExprClobberingError('Variables in expression "%s" overlap with '
'numexpr builtins: (%s)' % (expr, s))
class AbstractEngine(object):
"""Object serving as a base class for all engines."""
__metaclass__ = abc.ABCMeta
has_neg_frac = False
def __init__(self, expr):
self.expr = expr
self.aligned_axes = None
self.result_type = None
def convert(self):
"""Convert an expression for evaluation.
Defaults to return the expression as a string.
"""
return com.pprint_thing(self.expr)
def evaluate(self):
"""Run the engine on the expression
This method performs alignment which is necessary no matter what engine
is being used, thus its implementation is in the base class.
Returns
-------
obj : object
The result of the passed expression.
"""
if not self._is_aligned:
self.result_type, self.aligned_axes = _align(self.expr.terms)
# make sure no names in resolvers and locals/globals clash
res = self._evaluate()
return _reconstruct_object(self.result_type, res, self.aligned_axes,
self.expr.terms.return_type)
@property
def _is_aligned(self):
return self.aligned_axes is not None and self.result_type is not None
@abc.abstractmethod
def _evaluate(self):
"""Return an evaluated expression.
Parameters
----------
env : Scope
The local and global environment in which to evaluate an
expression.
Notes
-----
Must be implemented by subclasses.
"""
pass
class NumExprEngine(AbstractEngine):
"""NumExpr engine class"""
has_neg_frac = True
def __init__(self, expr):
super(NumExprEngine, self).__init__(expr)
def convert(self):
return str(super(NumExprEngine, self).convert())
def _evaluate(self):
import numexpr as ne
# convert the expression to a valid numexpr expression
s = self.convert()
try:
env = self.expr.env
scope = env.full_scope
truediv = scope['truediv']
_check_ne_builtin_clash(self.expr)
return ne.evaluate(s, local_dict=scope, truediv=truediv)
except KeyError as e:
# python 3 compat kludge
try:
msg = e.message
except AttributeError:
msg = compat.text_type(e)
raise UndefinedVariableError(msg)
class PythonEngine(AbstractEngine):
"""Evaluate an expression in Python space.
Mostly for testing purposes.
"""
has_neg_frac = False
def __init__(self, expr):
super(PythonEngine, self).__init__(expr)
def evaluate(self):
return self.expr()
def _evaluate(self):
pass
_engines = {'numexpr': NumExprEngine, 'python': PythonEngine}
| gpl-2.0 |
mdrumond/tensorflow | tensorflow/examples/tutorials/word2vec/word2vec_basic.py | 13 | 10426 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic word2vec example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
from tempfile import gettempdir
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
# pylint: disable=redefined-outer-name
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
local_filename = os.path.join(gettempdir(), filename)
if not os.path.exists(local_filename):
local_filename, _ = urllib.request.urlretrieve(url + filename,
local_filename)
statinfo = os.stat(local_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception('Failed to verify ' + local_filename +
'. Can you get to it with a browser?')
return local_filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
vocabulary = read_data(filename)
print('Data size', len(vocabulary))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
index = dictionary.get(word, 0)
if index == 0: # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
# Filling 4 global variables:
# data - list of codes (integers from 0 to vocabulary_size-1).
# This is the original text but words are replaced by their codes
# count - map of words(strings) to count of occurrences
# dictionary - map of words(strings) to their codes(integers)
# reverse_dictionary - maps codes(integers) to words(strings)
data, count, dictionary, reverse_dictionary = build_dataset(vocabulary,
vocabulary_size)
del vocabulary # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
if data_index + span > len(data):
data_index = 0
buffer.extend(data[data_index:data_index + span])
data_index += span
for i in range(batch_size // num_skips):
context_words = [w for w in range(span) if w != skip_window]
words_to_use = random.sample(context_words, num_skips)
for j, context_word in enumerate(words_to_use):
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[context_word]
if data_index == len(data):
buffer[:] = data[:span]
data_index = span
else:
buffer.append(data[data_index])
data_index += 1
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
num_sampled = 64 # Number of negative examples to sample.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent. These 3 variables are used only for
# displaying model accuracy, they don't affect calculation.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
# Explanation of the meaning of NCE loss:
# http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.global_variables_initializer()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print('Initialized')
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
# pylint: disable=missing-docstring
# Function to draw visualization of distance between embeddings.
def plot_with_labels(low_dim_embs, labels, filename):
assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
# pylint: disable=g-import-not-at-top
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels, os.path.join(gettempdir(), 'tsne.png'))
except ImportError as ex:
print('Please install sklearn, matplotlib, and scipy to show embeddings.')
print(ex)
| apache-2.0 |
belltailjp/scikit-learn | sklearn/preprocessing/tests/test_imputation.py | 213 | 11911 | import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.preprocessing.imputation import Imputer
from sklearn.pipeline import Pipeline
from sklearn import grid_search
from sklearn import tree
from sklearn.random_projection import sparse_random_matrix
def _check_statistics(X, X_true,
strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
Test:
- along the two axes
- with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, " \
"axis = {0}, sparse = {1}" % (strategy, missing_values)
# Normal matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
X_trans = imputer.fit(X).transform(X.copy())
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, False))
assert_array_equal(X_trans, X_true, err_msg.format(0, False))
# Normal matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(X.transpose())
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform, X.copy().transpose())
else:
X_trans = imputer.transform(X.copy().transpose())
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, False))
# Sparse matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
imputer.fit(sparse.csc_matrix(X))
X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, True))
assert_array_equal(X_trans, X_true, err_msg.format(0, True))
# Sparse matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(sparse.csc_matrix(X.transpose()))
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform,
sparse.csc_matrix(X.copy().transpose()))
else:
X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, True))
def test_imputation_shape():
# Verify the shapes of the imputed matrix for different strategies.
X = np.random.randn(10, 2)
X[::2] = np.nan
for strategy in ['mean', 'median', 'most_frequent']:
imputer = Imputer(strategy=strategy)
X_imputed = imputer.fit_transform(X)
assert_equal(X_imputed.shape, (10, 2))
X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
assert_equal(X_imputed.shape, (10, 2))
def test_imputation_mean_median_only_zero():
# Test imputation using the mean and median strategies, when
# missing_values == 0.
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
X_imputed_mean = np.array([
[3, 5],
[1, 3],
[2, 7],
[6, 13],
])
statistics_mean = [np.nan, 3, np.nan, np.nan, 7]
# Behaviour of median with NaN is undefined, e.g. different results in
# np.median and np.ma.median
X_for_median = X[:, [0, 1, 2, 4]]
X_imputed_median = np.array([
[2, 5],
[1, 3],
[2, 5],
[6, 13],
])
statistics_median = [np.nan, 2, np.nan, 5]
_check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0)
_check_statistics(X_for_median, X_imputed_median, "median",
statistics_median, 0)
def test_imputation_mean_median():
# Test imputation using the mean and median strategies, when
# missing_values != 0.
rng = np.random.RandomState(0)
dim = 10
dec = 10
shape = (dim * dim, dim + dec)
zeros = np.zeros(shape[0])
values = np.arange(1, shape[0]+1)
values[4::2] = - values[4::2]
tests = [("mean", "NaN", lambda z, v, p: np.mean(np.hstack((z, v)))),
("mean", 0, lambda z, v, p: np.mean(v)),
("median", "NaN", lambda z, v, p: np.median(np.hstack((z, v)))),
("median", 0, lambda z, v, p: np.median(v))]
for strategy, test_missing_values, true_value_fun in tests:
X = np.empty(shape)
X_true = np.empty(shape)
true_statistics = np.empty(shape[1])
# Create a matrix X with columns
# - with only zeros,
# - with only missing values
# - with zeros, missing values and values
# And a matrix X_true containing all true values
for j in range(shape[1]):
nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
nb_missing_values = max(shape[0] + dec * dec
- (j + dec) * (j + dec), 0)
nb_values = shape[0] - nb_zeros - nb_missing_values
z = zeros[:nb_zeros]
p = np.repeat(test_missing_values, nb_missing_values)
v = values[rng.permutation(len(values))[:nb_values]]
true_statistics[j] = true_value_fun(z, v, p)
# Create the columns
X[:, j] = np.hstack((v, z, p))
if 0 == test_missing_values:
X_true[:, j] = np.hstack((v,
np.repeat(
true_statistics[j],
nb_missing_values + nb_zeros)))
else:
X_true[:, j] = np.hstack((v,
z,
np.repeat(true_statistics[j],
nb_missing_values)))
# Shuffle them the same way
np.random.RandomState(j).shuffle(X[:, j])
np.random.RandomState(j).shuffle(X_true[:, j])
# Mean doesn't support columns containing NaNs, median does
if strategy == "median":
cols_to_keep = ~np.isnan(X_true).any(axis=0)
else:
cols_to_keep = ~np.isnan(X_true).all(axis=0)
X_true = X_true[:, cols_to_keep]
_check_statistics(X, X_true, strategy,
true_statistics, test_missing_values)
def test_imputation_median_special_cases():
# Test median imputation with sparse boundary cases
X = np.array([
[0, np.nan, np.nan], # odd: implicit zero
[5, np.nan, np.nan], # odd: explicit nonzero
[0, 0, np.nan], # even: average two zeros
[-5, 0, np.nan], # even: avg zero and neg
[0, 5, np.nan], # even: avg zero and pos
[4, 5, np.nan], # even: avg nonzeros
[-4, -5, np.nan], # even: avg negatives
[-1, 2, np.nan], # even: crossing neg and pos
]).transpose()
X_imputed_median = np.array([
[0, 0, 0],
[5, 5, 5],
[0, 0, 0],
[-5, 0, -2.5],
[0, 5, 2.5],
[4, 5, 4.5],
[-4, -5, -4.5],
[-1, 2, .5],
]).transpose()
statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5]
_check_statistics(X, X_imputed_median, "median",
statistics_median, 'NaN')
def test_imputation_most_frequent():
# Test imputation using the most-frequent strategy.
X = np.array([
[-1, -1, 0, 5],
[-1, 2, -1, 3],
[-1, 1, 3, -1],
[-1, 2, 3, 7],
])
X_true = np.array([
[2, 0, 5],
[2, 3, 3],
[1, 3, 3],
[2, 3, 7],
])
# scipy.stats.mode, used in Imputer, doesn't return the first most
# frequent as promised in the doc but the lowest most frequent. When this
# test will fail after an update of scipy, Imputer will need to be updated
# to be consistent with the new (correct) behaviour
_check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1)
def test_imputation_pipeline_grid_search():
# Test imputation within a pipeline + gridsearch.
pipeline = Pipeline([('imputer', Imputer(missing_values=0)),
('tree', tree.DecisionTreeRegressor(random_state=0))])
parameters = {
'imputer__strategy': ["mean", "median", "most_frequent"],
'imputer__axis': [0, 1]
}
l = 100
X = sparse_random_matrix(l, l, density=0.10)
Y = sparse_random_matrix(l, 1, density=0.10).toarray()
gs = grid_search.GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
def test_imputation_pickle():
# Test for pickling imputers.
import pickle
l = 100
X = sparse_random_matrix(l, l, density=0.10)
for strategy in ["mean", "median", "most_frequent"]:
imputer = Imputer(missing_values=0, strategy=strategy)
imputer.fit(X)
imputer_pickled = pickle.loads(pickle.dumps(imputer))
assert_array_equal(imputer.transform(X.copy()),
imputer_pickled.transform(X.copy()),
"Fail to transform the data after pickling "
"(strategy = %s)" % (strategy))
def test_imputation_copy():
# Test imputation with copy
X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0)
# copy=True, dense => copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_false(np.all(X == Xt))
# copy=True, sparse csr => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, dense => no copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_true(np.all(X == Xt))
# copy=False, sparse csr, axis=1 => no copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=0 => no copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=1 => copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=1, missing_values=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=0, strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
assert_false(sparse.issparse(Xt))
# Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
# made, even if copy=False.
| bsd-3-clause |
clingsz/GAE | gae_toy.py | 1 | 4152 | # -*- coding: utf-8 -*-
"""
Created on Wed May 10 12:38:40 2017
@author: Tianxiang Gao
"""
# analysis guided-auto-encoder
# 3 experiment
# 1) train cytokine using PCA and AE (cv error, visualization)
# 2) AE with different initialization
# 3) GAE the path, visualization
import misc.data_gen as dg
import misc.utils as utils
import gae.model.learner as learner
import matplotlib.pyplot as plt
import numpy
from sklearn.linear_model import ElasticNetCV
########################################################
# 0. Load the data
########################################################
immune_data = dg.load_immune(folds=5)
x,y = immune_data['x_train'],immune_data['y_train']
xv,yv = immune_data['x_test'],immune_data['y_test']
#%%
########################################################
# 1. Run the AE and PCA on the training dataset
########################################################
result_file_name = 'result/temp/gae_analysis_result.pkl'
LOADRESULT = True
if not LOADRESULT:
reses = []
for i in range(5):
res = learner.AE_train(x,y,alpha=0,randseed=i)
reses.append(res)
utils.saveobj(result_file_name,reses)
else:
reses = utils.loadobj(result_file_name)
pcares = learner.PCA_train(x,n_components=2)
reses = [pcares] + reses
#%%
########################################################
# 2. plot explained variance %
########################################################
rs = []
r_title = []
i = 0
for res in reses:
rs.append(learner.var_exp(x,res[0],res[1]))
r_title.append('AE' + str(i))
i += 1
r_title = ['PCA'] + r_title
plt.figure(figsize=[8,5])
plt.bar(numpy.arange(i)-0.5,rs)
plt.xticks(range(i),r_title)
plt.ylabel('Explained Variance')
plt.savefig('result/fig/pca_vs_ae_1.pdf', bbox_inches='tight')
plt.figure(figsize=[12,6])
i = 0
for i in range(6):
plt.subplot(2,3,i+1)
res = reses[i]
learner.visualizer(xv,res[0],res[1],r_title[i])
plt.tight_layout()
plt.savefig('result/fig/pca_vs_ae_2.pdf', bbox_inches='tight')
#%%
########################################################
# 3. The difference between codes
########################################################
cs = []
xx = numpy.concatenate([x,xv],axis=0)
L = len(reses)
for i in range(L):
c = reses[i][0](xx)
cs.append(c)
D = numpy.zeros([L,L])
for i in range(L):
for j in range(L):
D[i,j] = utils.linear_distance(cs[i],cs[j])
plt.imshow(1-D,aspect='auto',interpolation='none')
plt.colorbar()
plt.xticks(range(L),r_title)
plt.yticks(range(L),r_title)
plt.show()
#%%
########################################################
# 4. A GAE path
########################################################
alpha_list = numpy.arange(0,1.1,0.1)
result_file_name = 'result/temp/gae_analysis_result_gae.pkl'
LOADRESULT = True
if not LOADRESULT:
gaeres = []
for alpha in alpha_list:
res = learner.AE_train(x,y,alpha=alpha,randseed=0)
gaeres.append(res)
utils.saveobj(result_file_name,gaeres)
else:
gaeres = utils.loadobj(result_file_name)
allres = [pcares] + gaeres
alltit = ['PCA'] + map(lambda x: 'GAE-'+str(x),alpha_list)
recerrs = []
prederrs = []
for res in allres:
enc,dec = res
c = enc(xv)
x_new = dec(c)
recerrs.append(utils.mse(x_new,xv))
ct = enc(x)
enet = ElasticNetCV(random_state=0,cv=3)
enet.fit(ct,y.ravel())
yp = enet.predict(c)
prederrs.append(utils.mse(yp.ravel(),yv.ravel()))
plt.figure(figsize=[5,5])
for i in range(len(recerrs)):
xa,ya = recerrs[i],prederrs[i]
plt.scatter(xa,ya)
plt.text(xa,ya,alltit[i])
plt.xlabel('reconstruction loss')
plt.ylabel('prediction loss')
plt.savefig('result/fig/gae_path_toy.pdf', bbox_inches='tight')
plt.figure(figsize=[12,6])
jlst = [0,1,2,4,6,8]
for j in range(len(jlst)):
plt.subplot(2,3,j+1)
i = jlst[j]
res = allres[i]
learner.visualizer(x,res[0],res[1],alltit[i],y)
# c = allres[i][0](x)
# plt.scatter(c[:,0],c[:,1],c=y,cmap='hot',
# s=40,
# edgecolors='none',vmax=3,vmin=-3)
# plt.title(alltit[i])
plt.tight_layout()
plt.savefig('result/fig/gae_visual_toy.pdf', bbox_inches='tight')
| gpl-3.0 |
claudiusptolemy/ptolemy | python/triangulate_measure.py | 2 | 2147 | # regression_measure.py: Predict modern coordinates for places
# described by Ptolemy using linear regression against
# the places that have been suggested by other means.
import os
import logging
import pandas as pd
from sklearn.cross_validation import LeaveOneOut
from geopy.distance import vincenty
import sgdb
import geocode
from flocking import FlockingModel
PTOL_HOME = os.environ['PTOL_HOME']
logging.basicConfig(level='DEBUG')
KEY_PLACE_FIELDNAMES = [
'ptol_id',
'ptol_name',
'ptol_lat',
'ptol_lon',
'modern_name']
X_NAMES = ['ptol_lat', 'ptol_lon']
Y_NAMES = ['modern_lat', 'modern_lon']
P_NAMES = ['pred_lat', 'pred_lon']
# book 7 contains India
# chapter 1 is within the Ganges
TARGET_BOOK = '7.01'
places = sgdb.read_places().drop_duplicates('ptol_id')
places.reindex(columns=['ptol_id'])
places = places.loc[pd.notnull(places.ptol_lat), :]
places = places.loc[:, KEY_PLACE_FIELDNAMES]
places = places.loc[places.ptol_id.str.startswith(TARGET_BOOK), :]
places = pd.merge(places, geocode.read_geocodes(), how='left')
known = places.loc[pd.notnull(places.modern_lat), :]
known.is_copy = False
loo = LeaveOneOut(len(known))
for train, test in loo:
trainx = known.iloc[train, :].loc[:, X_NAMES]
trainy = known.iloc[train, :].loc[:, Y_NAMES]
testx = known.iloc[test, :].loc[:, X_NAMES]
model = FlockingModel()
model.fit(trainx, trainy)
testy = model.predict(testx)
known.loc[known.iloc[test,:].index, 'pred_lat'] = testy[0][0]
known.loc[known.iloc[test,:].index, 'pred_lon'] = testy[0][1]
for i, p in known.iterrows():
lat_err = p.modern_lat - p.pred_lat
lon_err = p.modern_lon - p.pred_lon
sq_err = lat_err ** 2 + lon_err ** 2
modern_coords = (p.modern_lat, p.modern_lon)
pred_coords = (p.pred_lat, p.pred_lon)
dist_err = vincenty(modern_coords, pred_coords).miles
known.loc[i, 'lat_err'] = lat_err
known.loc[i, 'lon_err'] = lon_err
known.loc[i, 'sq_err'] = sq_err
known.loc[i, 'dist_err'] = dist_err
known.to_csv('../Data/flocking_measure.csv', encoding='cp1252')
| gpl-2.0 |
startcode/apollo | modules/tools/calibration/plot_results.py | 2 | 2535 | #!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import math
import sys
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm as cmx
from matplotlib import colors as mcolors
markers = [
"o", "v", "^", "<", ">", "1", "2", "3", "4", "8", "s", "p", "*", "+", "x",
"d", "|", "_"
]
if len(sys.argv) < 2:
print "usage: python plot_results.py result.csv"
f = open(sys.argv[1], 'r')
cmd_table = {}
for line in f:
items = line.split(',')
cmd = round(float(items[0]))
speed = float(items[1])
acc = float(items[2])
if cmd in cmd_table:
speed_table = cmd_table[cmd]
if speed in speed_table:
speed_table[speed].append(acc)
else:
speed_table[speed] = [acc]
else:
speed_table = {}
speed_table[speed] = [acc]
cmd_table[cmd] = speed_table
f.close()
NCURVES = len(cmd_table)
np.random.seed(101)
curves = [np.random.random(20) for i in range(NCURVES)]
values = range(NCURVES)
jet = cm = plt.get_cmap('brg')
cNorm = mcolors.Normalize(vmin=0, vmax=values[-1])
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
cnt = 0
cmds = cmd_table.keys()
cmds.sort()
fig, ax = plt.subplots()
for cmd in cmds:
print "ctrl cmd = ", cmd
speed_table = cmd_table[cmd]
X = []
Y = []
speeds = speed_table.keys()
speeds.sort()
for speed in speeds:
X.append(speed)
Y.append(np.mean(speed_table[speed]))
colorVal = scalarMap.to_rgba(values[cnt])
ax.plot(
X,
Y,
c=colorVal,
linestyle=':',
marker=markers[cnt % len(markers)],
label="cmd=" + str(cmd))
cnt += 1
ax.legend(loc='upper center', shadow=True, bbox_to_anchor=(0.5, 1.1), ncol=5)
plt.ylabel("acc")
plt.xlabel("speed")
plt.grid()
plt.show()
| apache-2.0 |
RohitMetaCube/test_code | job_normalization_api.py | 1 | 20366 | import cherrypy
from skillset_extractor import nounphrase_extractor
from db_utils import DBUtils
from configurator import configurator
import time
from similarity_function import similarity_finder
from utils import format_skills, convert_encoding, find_all_ngrams_upto, create_lay_title_dict_and_lower_list
import nltk
from sklearn.externals import joblib
from filter_chain import filter_chain
from health_check import health_check
import os
from os.path import join
from os import listdir, rmdir
from shutil import move
import logging
from log_utils import OneLineExceptionFormatter
from SOCClassifier import SOCClassifierFactory
from utils import create_key
from norm_location_finder import create_location_maps
import csv
def save_to_file(filename,
context,
folder_path=configurator.commons.JOB_API_INIT_FILES_PATH):
if folder_path.strip():
if not os.path.exists(folder_path):
os.makedirs(folder_path)
if folder_path.strip()[-1] != '/':
folder_path = folder_path.strip() + "/"
joblib.dump(context, folder_path + filename + "." +
configurator.commons.MODEL_FILE_EXTENSION)
return (" file <{}> successfully saved".format(
folder_path + filename + "." +
configurator.commons.MODEL_FILE_EXTENSION))
def load_file(filename,
folder_path=configurator.commons.JOB_API_INIT_FILES_PATH):
if folder_path.strip() and folder_path.strip()[-1] != '/':
folder_path = folder_path.strip() + "/"
return joblib.load(folder_path + filename + "." +
configurator.commons.MODEL_FILE_EXTENSION)
class norm_job(object):
api_start_time = time.time()
SKILLSET_SIZE = 30
CONFIDENCE_THRESHOLD = 75
dbutils = DBUtils(configurator.commons.MONGODB_HOST)
universal_skill_set = dbutils.create_resume_posting_universal_skill_set(
SKILLSET_SIZE)
ngram_limit = 1
npe = nounphrase_extractor()
sf = similarity_finder()
JOBS_PARAMETER = "jobs"
JOB_TITLE_PARAMETER = "title"
JOB_DESCRIPTION_PARAMETER = "description"
PREVIOUS_JOB_TITLE_PARAMETER = "previous_title"
PREVIOUS_JOB_DESCRIPTION_PARAMETER = "previous_description"
SOC_HINT_PARAMETER = "soc_hint"
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
for skill in universal_skill_set:
l = len(skill.split())
if l > ngram_limit:
ngram_limit = l
def __init__(self):
LAY_TITLE_LIST_NAME = 'lay_title_list'
LAY_TITLE_DICT_NAME = 'lay_title_dict'
SIMILAR_TITLE_DICT_NAME = 'title_to_similar_title_dict'
CITY_LIST = 'city_list'
STATE_LIST = 'state_list'
STATE_CODES = 'state_codes'
SOC_MASTER_DICT_NAME = 'soc_master_dict'
SOC_MAPPING_NAME = 'soc_mapping'
SIMILAR_DICT_NAME = 'similar_title_dict'
try:
self.soc_master_dict = load_file(SOC_MASTER_DICT_NAME)
self.soc_mapping = load_file(SOC_MAPPING_NAME)
self.similar_title_dict = load_file(SIMILAR_DICT_NAME)
self.lay_title_list = load_file(LAY_TITLE_LIST_NAME)
self.title_to_similar_title_dict = load_file(
SIMILAR_TITLE_DICT_NAME)
self.lay_title_dict = load_file(LAY_TITLE_DICT_NAME)
self.city_list = load_file(CITY_LIST)
self.state_list = load_file(STATE_LIST)
self.state_codes = load_file(STATE_CODES)
except:
[self.soc_master_dict, self.lay_title_list, self.soc_mapping
] = norm_job.dbutils.create_all_lay_title_mappings()
[self.similar_title_dict, self.title_to_similar_title_dict
] = norm_job.dbutils.create_all_similar_title_mappings()
[self.lay_title_dict, self.lay_title_list
] = create_lay_title_dict_and_lower_list(self.lay_title_list)
res = create_lay_title_dict_and_lower_list(
self.title_to_similar_title_dict, stem_key=True)
self.lay_title_dict.update(res[0])
self.title_to_similar_title_dict = res[1]
del res
results = create_location_maps(norm_job.dbutils)
self.city_list = results["city_list"]
self.state_list = results["state_list"]
self.state_codes = results["state_codes"]
try:
folder = configurator.commons.JOB_API_INIT_FILES_PATH
if folder:
if folder.strip()[-1] != '/':
folder = folder.strip() + "/"
if not os.path.exists(folder):
os.makedirs(folder)
temp_folder_path = folder + str(os.getpid()) + "_" + str(
time.time())
try:
logging.info(
save_to_file(LAY_TITLE_DICT_NAME, self.lay_title_dict,
temp_folder_path))
logging.info(
save_to_file(LAY_TITLE_LIST_NAME, self.lay_title_list,
temp_folder_path))
logging.info(
save_to_file(SIMILAR_TITLE_DICT_NAME,
self.title_to_similar_title_dict,
temp_folder_path))
logging.info(
save_to_file(CITY_LIST, self.city_list,
temp_folder_path))
logging.info(
save_to_file(STATE_LIST, self.state_list,
temp_folder_path))
logging.info(
save_to_file(STATE_CODES, self.state_codes,
temp_folder_path))
logging.info(
save_to_file(SOC_MASTER_DICT_NAME,
self.soc_master_dict, temp_folder_path))
logging.info(
save_to_file(SOC_MAPPING_NAME, self.soc_mapping,
temp_folder_path))
logging.info(
save_to_file(SIMILAR_DICT_NAME, self.
similar_title_dict, temp_folder_path))
except Exception as e:
root.exception(e)
# if folder exists (may be due to parallel processes) then remove current temporary folder
''' Rename directory '''
if os.path.exists(temp_folder_path):
for filename in listdir(join(folder, temp_folder_path)):
move(
join(folder, temp_folder_path, filename),
join(folder, filename))
rmdir(temp_folder_path)
except Exception as e:
root.exception(e)
pass
remove_cities = [
'teller', 'home', 'cook', 'grill', 'helper', 'industrial', 'mobile'
]
for city in remove_cities:
if city in self.city_list:
del self.city_list[city]
self.soc_lay_title_token_list = {}
for soc, lts in self.lay_title_list.items():
self.soc_lay_title_token_list[soc] = {}
for lt in lts:
for token in set(lt.split()):
if token not in self.soc_lay_title_token_list[soc]:
self.soc_lay_title_token_list[soc][token] = set()
self.soc_lay_title_token_list[soc][token].add(lt)
if lt in self.title_to_similar_title_dict:
for st in self.title_to_similar_title_dict[lt]:
for token in set(st.split()):
if token not in self.soc_lay_title_token_list[soc]:
self.soc_lay_title_token_list[soc][
token] = set()
self.soc_lay_title_token_list[soc][token].add(st)
'''Load Model'''
try:
self.model = SOCClassifierFactory.create_classifier(
configurator.commons.JOB_POSTING_CLF_NAME)
except Exception as e:
root.exception(e)
exit()
f = open('dictionaries/selected_ngrams_for_driver.csv', 'rb')
fr = csv.reader(f, delimiter='\t')
self.driver_ngrams_set = set(
[row[0] for row in fr if (row[0] and row[0].strip())])
ltm_cursor = norm_job.dbutils.fetch_data(
configurator.commons.LAY_TITLE_MASTER, 'cursor',
{'soc_code': {
'$regex': '^53-'
}}, {'soc_code': 1})
self.driver_soc_codes = [(ltm_elem['soc_code'], 100)
for ltm_elem in ltm_cursor]
root.info("API Start Time= {}s".format(time.time() -
norm_job.api_start_time))
def extract_soc_code(self, text, prefix, suffix):
if text.startswith(prefix) and text.endswith(suffix):
return text[len(prefix):-len(suffix)]
return None
@staticmethod
def fetch_closest_lay_title(lay_title_list, soc_lay_title_token_list,
soc_code_tuple, job_title, job_description):
closest_lay_title = configurator.commons.DEFAULT_CLOSEST_LAY_TITLE
default_soc = soc_code_tuple[0]
top_soc = soc_code_tuple[0]
valid_lay_titles = set()
tokens = job_title.split()
for soc_code in soc_code_tuple:
if soc_code in soc_lay_title_token_list:
for token in tokens:
if token in soc_lay_title_token_list[soc_code]:
valid_lay_titles = valid_lay_titles.union(
soc_lay_title_token_list[soc_code][token])
if len(valid_lay_titles):
closest_lay_title = norm_job.sf.find_closest_lay_title(
valid_lay_titles, job_title, job_description)
for soc in soc_code_tuple:
if soc in lay_title_list and closest_lay_title in set(
lay_title_list[soc]):
default_soc = soc
break
return closest_lay_title, default_soc, top_soc
@cherrypy.expose
@cherrypy.tools.json_out()
@cherrypy.tools.json_in()
def normalize(self, **other_params):
cherrypy.response.headers['Content-Type'] = "application/json"
params = {}
if cherrypy.request.method == "POST":
params = cherrypy.request.json
error_message = str()
error_flag = False
job_description = ""
batch_size = 0
total_time = time.time()
if norm_job.JOBS_PARAMETER not in params:
error_flag = True
error_message = configurator.commons.MALFORMED_REQUEST_ERROR_MESSAGE
else:
jobs = params[norm_job.JOBS_PARAMETER]
job_array = []
skill_array = []
responses = []
bypass_array = []
batch_size = len(jobs)
for job in jobs:
try:
filtered_title = job[norm_job.JOB_TITLE_PARAMETER]
if "instead of" in filtered_title.lower():
filtered_title = filtered_title[:filtered_title.lower(
).find("instead of")].strip()
filtered_title = create_key(filtered_title, self.city_list,
self.state_list,
self.state_codes)
job[norm_job.JOB_TITLE_PARAMETER] = filtered_title
except:
filtered_title = ""
job_description = ""
if norm_job.JOB_DESCRIPTION_PARAMETER in job:
job_description = job[norm_job.JOB_DESCRIPTION_PARAMETER]
title_ngrams = find_all_ngrams_upto(filtered_title.lower(), 4)
if title_ngrams.intersection(self.driver_ngrams_set):
bypass_array.append(1)
else:
job_array.append((filtered_title, job_description))
bypass_array.append(0)
imp_skills = set()
if job_description:
sentences = norm_job.sent_detector.tokenize(
job_description)
for sentence in sentences:
lower_sentence = sentence.lower()
sentence_n_grams = find_all_ngrams_upto(
lower_sentence, norm_job.ngram_limit)
imp_skills.update(
sentence_n_grams.intersection(
norm_job.universal_skill_set))
skill_array.append(imp_skills)
start_time = time.time()
prediction_array = self.model.predict(job_array)
root.info(
"Context Free classification for {0} points done in {1}s".
format(len(prediction_array), time.time() - start_time))
del job_array
# root.info(prediction_array)
start_time = time.time()
for point_index, selector_value in enumerate(bypass_array):
if selector_value:
soc_codes_with_conf = self.driver_soc_codes
else:
soc_codes_with_conf = prediction_array.pop(0)
soc_codes = [
soc[0]
for soc in sorted(
soc_codes_with_conf, key=lambda k: k[1], reverse=True)
]
try:
job_title = jobs[point_index][norm_job.JOB_TITLE_PARAMETER]
if "instead of" in job_title.lower():
job_title = job_title[:job_title.lower().find(
"instead of")].strip()
except:
error_flag = True
error_message = configurator.commons.MALFORMED_REQUEST_ERROR_MESSAGE
if not error_flag:
response_json = {}
response_json["index"] = point_index
response_json["clean_original_title"] = format_skills(jobs[
point_index][norm_job.JOB_TITLE_PARAMETER])
response_json["soc_code"] = ''
response_json["confidence"] = 0
response_json["closest_lay_title"] = ''
response_json["major_group_string"] = ''
response_json["skills"] = list(skill_array[point_index])
if not soc_codes:
''' The given job posting could not be normalized using our standard algorithm.
We should use the soc_hint parameter present here to see if we can find a nearby
title in the given hint SOC code.'''
if norm_job.SOC_HINT_PARAMETER in jobs[point_index]:
soc_hint = jobs[point_index][
norm_job.SOC_HINT_PARAMETER]
if soc_hint in self.soc_mapping:
''' This is a valid SOC Code '''
associated_soc_codes = self.soc_mapping[
soc_hint]
soc_codes = list(associated_soc_codes)
root.info(
"Hinted {} hence, Comparing Against Codes {}".
format(soc_hint, soc_codes))
else:
''' This is an invalid SOC Code and we can't do much about it. '''
root.info(
"No matching SOC Code found in soc_hint {}. Cannot normalize.".
format(soc_hint))
if soc_codes:
key_string = filter_chain.apply(
convert_encoding(job_title), is_title=True)[1]
closest_lay_title_tuple = norm_job.fetch_closest_lay_title(
self.lay_title_list, self.soc_lay_title_token_list,
soc_codes, key_string, "")
major_group_string = configurator.commons.DEFAULT_MAJOR_GROUP_STRING
if closest_lay_title_tuple[1] in self.soc_master_dict:
major_group_string = self.soc_master_dict[
closest_lay_title_tuple[1]][
'major_group_string']
lay_title = convert_encoding(closest_lay_title_tuple[
0])
if lay_title in self.lay_title_dict:
lay_title = self.lay_title_dict[lay_title]
if lay_title in self.similar_title_dict:
lay_title = self.similar_title_dict[lay_title]
response_json["soc_code"] = closest_lay_title_tuple[1]
response_json["confidence"] = int(
dict(soc_codes_with_conf)[closest_lay_title_tuple[
1]])
response_json['top_soc'] = closest_lay_title_tuple[2]
response_json["closest_lay_title"] = lay_title
response_json[
"major_group_string"] = major_group_string
else:
response_json = {
"error_code":
configurator.commons.MALFORMED_REQUEST_ERROR_STATUS,
"message": error_message
}
responses.append(response_json)
error_flag = False
if (point_index + 1) % 1000 == 0:
root.info("{0} points done in {1}s".format(
point_index, time.time() - start_time))
start_time = time.time()
responses_object = {"normalized_jobs": responses}
if error_flag:
cherrypy.response.status = configurator.commons.MALFORMED_REQUEST_ERROR_STATUS
responses_object = {
"error_code":
configurator.commons.MALFORMED_REQUEST_ERROR_STATUS,
"message": error_message
}
root.info("{0} points done in {1}s".format(batch_size,
time.time() - total_time))
return responses_object
''' Initializing the web server '''
if __name__ == '__main__':
logging_handler = logging.StreamHandler()
log_format = OneLineExceptionFormatter(
configurator.commons.LOG_FORMAT_STRING,
configurator.commons.LOG_TIME_FORMAT)
logging_handler.setFormatter(log_format)
root = logging.getLogger()
root.setLevel(logging.DEBUG)
root.addHandler(logging_handler)
cherrypy.config.update({
'server.socket_host': '0.0.0.0',
'server.socket_port': configurator.commons.JOB_NORMALIZATION_API_PORT,
'server.thread_pool':
configurator.commons.JOB_NORMALIZATION_API_THREADS,
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'response.timeout':
configurator.commons.JOB_NORMALIZATION_API_RESPONSE_TIMEOUT,
'server.socket_queue_size':
configurator.commons.JOB_NORMALIZATION_API_SOCKET_QUEUE_SIZE,
'engine.timeout_monitor.on': False,
'log.screen': False,
'log.access_file': '',
'log.error_log_propagate': False,
'log.accrss_log.propagate': False,
'log.error_file': ''
})
cherrypy.tree.mount(
norm_job(),
configurator.commons.JOB_NORMALIZATION_API_CONTEXT,
config={'/': {}})
cherrypy.tree.mount(
health_check(),
configurator.commons.HEARTBEAT_CONTEXT,
config={'/': {}})
cherrypy.engine.start()
cherrypy.engine.block()
| gpl-3.0 |
wazeerzulfikar/scikit-learn | sklearn/linear_model/tests/test_theil_sen.py | 55 | 9939 | """
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <[email protected]>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import (
assert_almost_equal, assert_greater, assert_less, raises,
)
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
with open(os.devnull, 'w') as devnull:
sys.stdout = devnull
sys.stderr = devnull
yield
devnull.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1 / (np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
| bsd-3-clause |
ZENGXH/scikit-learn | sklearn/utils/tests/test_murmurhash.py | 261 | 2836 | # Author: Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.externals.six import b, u
from sklearn.utils.murmurhash import murmurhash3_32
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from nose.tools import assert_equal, assert_true
def test_mmhash3_int():
assert_equal(murmurhash3_32(3), 847579505)
assert_equal(murmurhash3_32(3, seed=0), 847579505)
assert_equal(murmurhash3_32(3, seed=42), -1823081949)
assert_equal(murmurhash3_32(3, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=False), -1823081949)
assert_equal(murmurhash3_32(3, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=True), 2471885347)
def test_mmhash3_int_array():
rng = np.random.RandomState(42)
keys = rng.randint(-5342534, 345345, size=3 * 2 * 1).astype(np.int32)
keys = keys.reshape((3, 2, 1))
for seed in [0, 42]:
expected = np.array([murmurhash3_32(int(k), seed)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed), expected)
for seed in [0, 42]:
expected = np.array([murmurhash3_32(k, seed, positive=True)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed, positive=True),
expected)
def test_mmhash3_bytes():
assert_equal(murmurhash3_32(b('foo'), 0), -156908512)
assert_equal(murmurhash3_32(b('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(b('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(b('foo'), 42, positive=True), 2972666014)
def test_mmhash3_unicode():
assert_equal(murmurhash3_32(u('foo'), 0), -156908512)
assert_equal(murmurhash3_32(u('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(u('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(u('foo'), 42, positive=True), 2972666014)
def test_no_collision_on_byte_range():
previous_hashes = set()
for i in range(100):
h = murmurhash3_32(' ' * i, 0)
assert_true(h not in previous_hashes,
"Found collision on growing empty string")
def test_uniform_distribution():
n_bins, n_samples = 10, 100000
bins = np.zeros(n_bins, dtype=np.float)
for i in range(n_samples):
bins[murmurhash3_32(i, positive=True) % n_bins] += 1
means = bins / n_samples
expected = np.ones(n_bins) / n_bins
assert_array_almost_equal(means / expected, np.ones(n_bins), 2)
| bsd-3-clause |
Pinafore/qb | figures.py | 2 | 26133 | #!/usr/bin/env python
# pylint: disable=wrong-import-position
import os
import json
import sys
import pickle
from typing import List
if "DISPLAY" not in os.environ:
import matplotlib
matplotlib.use("agg")
import glob
import click
import pandas as pd
from pandas.api.types import CategoricalDtype
import numpy as np
from scipy.stats import binned_statistic
from plotnine import (
ggplot,
aes,
facet_wrap,
ggtitle,
labeller,
geom_smooth,
geom_density,
geom_histogram,
geom_bar,
geom_line,
geom_point,
geom_errorbar,
geom_errorbarh,
stat_summary_bin,
coord_flip,
stat_smooth,
scale_y_continuous,
scale_x_continuous,
xlab,
ylab,
theme,
element_text,
element_blank,
stat_ecdf,
ylim,
scale_color_manual,
scale_color_discrete,
coord_cartesian,
)
QB_ROOT = os.environ.get("QB_ROOT", "")
DEV_REPORT_PATTERN = os.path.join(
QB_ROOT, "output/guesser/best/**/guesser_report_guessdev.pickle"
)
TEST_REPORT_PATTERN = os.path.join(
QB_ROOT, "output/guesser/best/**/guesser_report_guesstest.pickle"
)
EXPO_REPORT_PATTERN = os.path.join(
QB_ROOT, "output/guesser/best/**/guesser_report_expo.pickle"
)
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
@click.group()
def main():
pass
def safe_path(path):
os.makedirs(os.path.dirname(path), exist_ok=True)
return path
def category_jmlr(cat):
if cat in {"Religion", "Myth", "Philosophy"}:
return "Religion/Myth/Philosophy"
elif cat == "Trash":
return "Popular Culture"
else:
return cat
def int_to_correct(num):
if num == 1:
return "Correct"
else:
return "Wrong"
def save_plot(output_dir, guesser_name, name, plot, width=None, height=None):
plot.save(
safe_path(os.path.join(output_dir, guesser_name, name)),
width=width,
height=height,
)
class GuesserReport:
def __init__(self, unpickled_report, fold):
self.fold = fold
self.char_df = unpickled_report["char_df"]
self.first_df = unpickled_report["first_df"]
self.full_df = unpickled_report["full_df"]
self.guesser_name = unpickled_report["guesser_name"]
self.full_df["seen"] = "Full Question"
self.first_df["seen"] = "First Sentence"
self.combined_df = pd.concat([self.full_df, self.first_df])
self.combined_df["Outcome"] = self.combined_df.correct.map(int_to_correct)
self.combined_df["category_jmlr"] = self.combined_df.category.map(category_jmlr)
self.combined_df = (
self.combined_df.groupby(["qanta_id", "seen"]).nth(0).reset_index()
)
self.char_plot_df = (
self.char_df.sort_values("score", ascending=False)
.groupby(["qanta_id", "char_index"])
.nth(0)
.reset_index()
)
self.char_plot_df["category_jmlr"] = self.char_plot_df.category.map(
category_jmlr
)
self.char_plot_df["Outcome"] = self.char_plot_df.correct.map(int_to_correct)
self.first_accuracy = unpickled_report["first_accuracy"]
self.full_accuracy = unpickled_report["full_accuracy"]
self.unanswerable_answer_percent = unpickled_report[
"unanswerable_answer_percent"
]
self.unanswerable_question_percent = unpickled_report[
"unanswerable_question_percent"
]
def plot_n_train_vs_accuracy(self):
return (
ggplot(self.combined_df)
+ facet_wrap("seen")
+ aes(x="n_train", fill="Outcome")
+ geom_histogram(binwidth=1)
)
def plot_char_percent_vs_accuracy_histogram(self, category=False):
if category:
return (
ggplot(self.char_plot_df)
+ facet_wrap("category_jmlr")
+ aes(x="char_percent", fill="Outcome")
+ geom_histogram(binwidth=0.05)
)
else:
return (
ggplot(self.char_plot_df)
+ aes(x="char_percent", fill="Outcome")
+ geom_histogram(binwidth=0.05)
)
def plot_char_percent_vs_accuracy_smooth(self, category=False):
if category:
return (
ggplot(self.char_plot_df)
+ aes(x="char_percent", y="correct", color="category_jmlr")
+ geom_smooth()
)
else:
return (
ggplot(self.char_plot_df)
+ aes(x="char_percent", y="correct")
+ geom_smooth(method="mavg")
)
GUESSER_SHORT_NAMES = {
"qanta.guesser.rnn.RnnGuesser": " RNN",
"qanta.guesser.dan.DanGuesser": " DAN",
"qanta.guesser.elasticsearch.ElasticSearchGuesser": "IR",
}
def to_shortname(name):
if name in GUESSER_SHORT_NAMES:
return GUESSER_SHORT_NAMES[name]
else:
return name
def to_dataset(fold):
if fold == "expo":
return "Challenge Questions"
elif fold == "guesstest":
return "Regular Test"
else:
return fold
def label_source(original):
if original == "es":
return "Round 1 - IR Adversarial"
elif original == "rnn":
return "Round 2 - RNN Adversarial"
elif original == "rnn-exact":
return "Round 2 - Exact-RNN Adversarial"
elif original == "rnn-noexact":
return "Round 2 - NoExact-RNN Adversarial"
elif original == "rnn-paired":
return "Round 2 - Paired-RNN Adversarial"
elif original == "rnn-nopaired":
return "Round 2 - NoPaired-RNN Adversarial"
elif original == "rnn-sheet":
return "Round 2 - Sheet-RNN Adversarial"
elif original == "rnn-nosheet":
return "Round 2 - NoSheet-RNN Adversarial"
elif original == "rnn-packet34":
return "Round 2 - P34-RNN Adversarial"
elif original == "es-2":
return "Round 2 - IR Adversarial"
else:
raise ValueError(f"unknown source: {original}")
def mean_no_se(series, mult=1):
m = np.mean(series)
se = mult * np.sqrt(np.var(series) / len(series))
return pd.DataFrame({"y": [m], "ymin": m, "ymax": m})
def sort_humans(humans):
def order(h):
if "Intermediate" in h:
return -1
elif "Expert" in h:
return 0
elif "National" in h:
return 1
return sorted(humans, key=order)
ENABLE_EDIT_INFO = False
class CompareGuesserReport:
def __init__(
self,
reports: List[GuesserReport],
mvg_avg_char=False,
exclude_zero_train=False,
merge_humans=False,
no_humans=False,
rounds="1,2",
title="",
y_max=None,
save_df=None,
):
self.save_df = save_df
self.y_max = y_max
self.rounds = {int(n) for n in rounds.split(",")}
self.title = title
self.mvg_avg_char = mvg_avg_char
self.reports = reports
self.exclude_zero_train = exclude_zero_train
self.merge_humans = merge_humans
self.no_humans = no_humans
char_plot_dfs = []
acc_rows = []
for r in self.reports:
char_plot_dfs.append(r.char_plot_df)
name = to_shortname(r.guesser_name)
dataset = to_dataset(r.fold)
acc_rows.append((r.fold, name, "First Sentence", r.first_accuracy, dataset))
acc_rows.append((r.fold, name, "Full Question", r.full_accuracy, dataset))
self.char_plot_df = pd.concat(char_plot_dfs)
if self.exclude_zero_train:
self.char_plot_df = self.char_plot_df[self.char_plot_df.n_train > 0]
self.char_plot_df["Guessing_Model"] = self.char_plot_df["guesser"].map(
to_shortname
)
self.char_plot_df["Dataset"] = self.char_plot_df["fold"].map(to_dataset)
self.char_plot_df["source"] = "unknown"
if os.path.exists("data/external/datasets/merged_trickme-id-model.json"):
if ENABLE_EDIT_INFO and os.path.exists("output/tacl/edit_info.json"):
with open("output/tacl/edit_info.json") as f:
eprint("Using output/tacl/edit_info.json")
edit_info = {e["post_qanta_id"]: e for e in json.load(f)}
trick_to_email = None
elif os.path.exists("output/tacl/tacl-spreadsheet.tsv"):
with open("data/external/datasets/qanta.tacl-trick.json") as f:
trick_to_qanta_id = {
q["trick_id"]: q["qanta_id"]
for q in json.load(f)["questions"]
if q["trick_id"] is not None
}
eprint("Using output/tacl/tacl-spreadsheet.tsv")
sheet_df = pd.read_csv(
"output/tacl/tacl-spreadsheet.tsv",
header=0,
sep="\t",
index_col=None,
)
sheet_df = sheet_df.rename(
columns={"ID": "trick_id", "Author (by e-mail)": "email"}
)
sheet_df = sheet_df[["trick_id", "email"]]
trick_to_email = {}
for t in sheet_df.itertuples():
if t.trick_id not in trick_to_qanta_id:
raise ValueError(f"Trick id not in qanta id map: {t.trick_id}")
trick_to_email[trick_to_qanta_id[t.trick_id]] = t.email
edit_info = None
else:
edit_info = None
trick_to_email = None
eprint("Separating questions into rnn/es")
with open("data/external/datasets/merged_trickme-id-model.json") as f:
trick_sources = json.load(f)
id_rows = []
for sqid, source in trick_sources.items():
sqid = int(sqid)
if edit_info is not None:
# Only update rnn source for debugging
if source == "rnn":
if sqid in edit_info:
if edit_info[sqid]["exact"]:
source = "rnn-exact"
else:
source = "rnn-noexact"
else:
raise ValueError(f"No edit info for: {sqid}")
elif trick_to_email is not None:
if sqid in trick_to_email:
if trick_to_email[sqid] in (
"[email protected]",
"[email protected]",
):
source = "rnn-packet34"
id_rows.append({"qanta_id": sqid, "source": source, "fold": "expo"})
id_df = pd.DataFrame(id_rows)
self.char_plot_df = self.char_plot_df.merge(
id_df, on=("qanta_id", "fold"), how="left"
)
self.char_plot_df["source"] = self.char_plot_df["source_y"].fillna(
"unknown"
)
if trick_to_email is not None:
eprint(
f"N Questions pre filter: {len(self.char_plot_df.qanta_id.unique())}"
)
self.char_plot_df = self.char_plot_df[
self.char_plot_df["source"] != "rnn-packet34"
]
eprint(
f"N Questions post filter: {len(self.char_plot_df.qanta_id.unique())}"
)
self.char_plot_df.loc[
self.char_plot_df.source != "unknown", "Dataset"
] = self.char_plot_df[self.char_plot_df.source != "unknown"][
"source"
].map(
label_source
)
self.acc_df = pd.DataFrame.from_records(
acc_rows, columns=["fold", "guesser", "position", "accuracy", "Dataset"]
)
def plot_char_percent_vs_accuracy_smooth(
self, expo=False, no_models=False, columns=False
):
if self.y_max is not None:
limits = [0, float(self.y_max)]
eprint(f"Setting limits to: {limits}")
else:
limits = [0, 1]
if expo:
if (
os.path.exists("data/external/all_human_gameplay.json")
and not self.no_humans
):
with open("data/external/all_human_gameplay.json") as f:
all_gameplay = json.load(f)
frames = []
for event, name in [
("parents", "Intermediate"),
("maryland", "Expert"),
("live", "National"),
]:
if self.merge_humans:
name = "Human"
gameplay = all_gameplay[event]
if event != "live":
control_correct_positions = gameplay[
"control_correct_positions"
]
control_wrong_positions = gameplay[
"control_wrong_positions"
]
control_positions = (
control_correct_positions + control_wrong_positions
)
control_positions = np.array(control_positions)
control_result = np.array(
len(control_correct_positions) * [1]
+ len(control_wrong_positions) * [0]
)
argsort_control = np.argsort(control_positions)
control_x = control_positions[argsort_control]
control_sorted_result = control_result[argsort_control]
control_y = (
control_sorted_result.cumsum()
/ control_sorted_result.shape[0]
)
control_df = pd.DataFrame(
{"correct": control_y, "char_percent": control_x}
)
control_df["Dataset"] = "Regular Test"
control_df["Guessing_Model"] = f" {name}"
frames.append(control_df)
adv_correct_positions = gameplay["adv_correct_positions"]
adv_wrong_positions = gameplay["adv_wrong_positions"]
adv_positions = adv_correct_positions + adv_wrong_positions
adv_positions = np.array(adv_positions)
adv_result = np.array(
len(adv_correct_positions) * [1]
+ len(adv_wrong_positions) * [0]
)
argsort_adv = np.argsort(adv_positions)
adv_x = adv_positions[argsort_adv]
adv_sorted_result = adv_result[argsort_adv]
adv_y = adv_sorted_result.cumsum() / adv_sorted_result.shape[0]
adv_df = pd.DataFrame({"correct": adv_y, "char_percent": adv_x})
adv_df["Dataset"] = "IR Adversarial"
adv_df["Guessing_Model"] = f" {name}"
frames.append(adv_df)
if len(gameplay["advneural_correct_positions"]) > 0:
adv_correct_positions = gameplay[
"advneural_correct_positions"
]
adv_wrong_positions = gameplay["advneural_wrong_positions"]
adv_positions = adv_correct_positions + adv_wrong_positions
adv_positions = np.array(adv_positions)
adv_result = np.array(
len(adv_correct_positions) * [1]
+ len(adv_wrong_positions) * [0]
)
argsort_adv = np.argsort(adv_positions)
adv_x = adv_positions[argsort_adv]
adv_sorted_result = adv_result[argsort_adv]
adv_y = (
adv_sorted_result.cumsum() / adv_sorted_result.shape[0]
)
adv_df = pd.DataFrame(
{"correct": adv_y, "char_percent": adv_x}
)
adv_df["Dataset"] = "RNN Adversarial"
adv_df["Guessing_Model"] = f" {name}"
frames.append(adv_df)
human_df = pd.concat(frames)
human_vals = sort_humans(list(human_df["Guessing_Model"].unique()))
human_dtype = CategoricalDtype(human_vals, ordered=True)
human_df["Guessing_Model"] = human_df["Guessing_Model"].astype(
human_dtype
)
dataset_dtype = CategoricalDtype(
["Regular Test", "IR Adversarial", "RNN Adversarial"],
ordered=True,
)
human_df["Dataset"] = human_df["Dataset"].astype(dataset_dtype)
if no_models:
p = ggplot(human_df) + geom_point(shape=".")
else:
df = self.char_plot_df
if 1 not in self.rounds:
df = df[df["Dataset"] != "Round 1 - IR Adversarial"]
if 2 not in self.rounds:
df = df[df["Dataset"] != "Round 2 - IR Adversarial"]
df = df[df["Dataset"] != "Round 2 - RNN Adversarial"]
p = ggplot(df)
if self.save_df is not None:
eprint(f"Saving df to: {self.save_df}")
df.to_json(self.save_df)
if (
os.path.exists("data/external/all_human_gameplay.json")
and not self.no_humans
):
eprint("Loading human data")
p = p + geom_line(data=human_df)
if columns:
facet_conf = facet_wrap("Guessing_Model", ncol=1)
else:
facet_conf = facet_wrap("Guessing_Model", nrow=1)
if not no_models:
if self.mvg_avg_char:
chart = stat_smooth(
method="mavg", se=False, method_args={"window": 400}
)
else:
chart = stat_summary_bin(
fun_data=mean_no_se,
bins=20,
shape=".",
linetype="None",
size=0.5,
)
else:
chart = None
p = p + facet_conf + aes(x="char_percent", y="correct", color="Dataset")
if chart is not None:
p += chart
p = (
p
+ scale_y_continuous(breaks=np.linspace(0, 1, 6))
+ scale_x_continuous(breaks=[0, 0.5, 1])
+ coord_cartesian(ylim=limits)
+ xlab("Percent of Question Revealed")
+ ylab("Accuracy")
+ theme(
# legend_position='top', legend_box_margin=0, legend_title=element_blank(),
strip_text_x=element_text(margin={"t": 6, "b": 6, "l": 1, "r": 5})
)
+ scale_color_manual(
values=["#FF3333", "#66CC00", "#3333FF", "#FFFF33"],
name="Questions",
)
)
if self.title != "":
p += ggtitle(self.title)
return p
else:
if self.save_df is not None:
eprint(f"Saving df to: {self.save_df}")
df.to_json(self.save_df)
return (
ggplot(self.char_plot_df)
+ aes(x="char_percent", y="correct", color="Guessing_Model")
+ stat_smooth(method="mavg", se=False, method_args={"window": 500})
+ scale_y_continuous(breaks=np.linspace(0, 1, 6))
+ coord_cartesian(ylim=limits)
)
def plot_compare_accuracy(self, expo=False):
if expo:
return (
ggplot(self.acc_df)
+ facet_wrap("position")
+ aes(x="guesser", y="accuracy", fill="Dataset")
+ geom_bar(stat="identity", position="dodge")
+ xlab("Guessing Model")
+ ylab("Accuracy")
)
else:
return (
ggplot(self.acc_df)
+ facet_wrap("position")
+ aes(x="guesser", y="accuracy")
+ geom_bar(stat="identity")
)
def save_all_plots(output_dir, report: GuesserReport, expo=False):
if not expo:
save_plot(
output_dir,
report.guesser_name,
"n_train_vs_accuracy.pdf",
report.plot_n_train_vs_accuracy(),
)
save_plot(
output_dir,
report.guesser_name,
"char_percent_vs_accuracy_histogram.pdf",
report.plot_char_percent_vs_accuracy_histogram(category=False),
)
if not expo:
save_plot(
output_dir,
report.guesser_name,
"char_percent_vs_accuracy_histogram_category.pdf",
report.plot_char_percent_vs_accuracy_histogram(category=True),
)
save_plot(
output_dir,
report.guesser_name,
"char_percent_vs_accuracy_smooth.pdf",
report.plot_char_percent_vs_accuracy_smooth(category=False),
)
if not expo:
save_plot(
output_dir,
report.guesser_name,
"char_percent_vs_accuracy_smooth_category.pdf",
report.plot_char_percent_vs_accuracy_smooth(category=True),
)
@main.command()
@click.option("--use-test", is_flag=True, default=False)
@click.option("--only-tacl", is_flag=True, default=False)
@click.option("--no-models", is_flag=True, default=False)
@click.option("--no-humans", is_flag=True, default=False)
@click.option("--columns", is_flag=True, default=False)
@click.option("--no-expo", is_flag=True, default=False)
@click.option("--mvg-avg-char", is_flag=True, default=False)
@click.option("--exclude-zero-train", is_flag=True, default=False)
@click.option("--merge-humans", is_flag=True, default=False)
@click.option("--rounds", default="1,2")
@click.option("--title", default="")
@click.option("--y-max", default=None)
@click.option("--save-df", default=None, type=str)
@click.argument("output_dir")
def guesser(
use_test,
only_tacl,
no_models,
no_humans,
columns,
no_expo,
mvg_avg_char,
exclude_zero_train,
merge_humans,
rounds,
title,
y_max,
save_df,
output_dir,
):
if use_test:
REPORT_PATTERN = TEST_REPORT_PATTERN
report_fold = "guesstest"
else:
REPORT_PATTERN = DEV_REPORT_PATTERN
report_fold = "guessdev"
dev_reports = []
for path in glob.glob(REPORT_PATTERN):
if only_tacl and "VWGuesser" in path:
continue
with open(path, "rb") as f:
report = GuesserReport(pickle.load(f), report_fold)
dev_reports.append(report)
if not only_tacl:
save_all_plots(output_dir, report)
if not no_expo:
expo_reports = []
expo_output_dir = safe_path(os.path.join(output_dir, "expo"))
for path in glob.glob(EXPO_REPORT_PATTERN):
if only_tacl and "VWGuesser" in path:
continue
with open(path, "rb") as f:
report = GuesserReport(pickle.load(f), "expo")
expo_reports.append(report)
if not only_tacl:
save_all_plots(expo_output_dir, report, expo=True)
if not only_tacl:
compare_report = CompareGuesserReport(dev_reports, rounds=rounds, title=title)
save_plot(
output_dir,
"compare",
"position_accuracy.pdf",
compare_report.plot_compare_accuracy(),
)
save_plot(
output_dir,
"compare",
"char_accuracy.pdf",
compare_report.plot_char_percent_vs_accuracy_smooth(),
)
eprint(f"N Expo Reports {len(expo_reports)}")
if not no_expo and (len(expo_reports) > 0 or no_models):
compare_report = CompareGuesserReport(
dev_reports + expo_reports,
mvg_avg_char=mvg_avg_char,
exclude_zero_train=exclude_zero_train,
merge_humans=merge_humans,
no_humans=no_humans,
rounds=rounds,
title=title,
y_max=y_max,
save_df=save_df,
)
save_plot(
output_dir,
"compare",
"expo_position_accuracy.pdf",
compare_report.plot_compare_accuracy(expo=True),
)
if columns:
height = 6.0
width = 1.7
else:
height = 1.7
width = 7.0
save_plot(
output_dir,
"compare",
"expo_char_accuracy.pdf",
compare_report.plot_char_percent_vs_accuracy_smooth(
expo=True, no_models=no_models, columns=columns
),
height=height,
width=width,
)
if __name__ == "__main__":
main()
| mit |
RPGOne/scikit-learn | sklearn/utils/tests/test_extmath.py | 3 | 24619 | # Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import np_version
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import fast_dot, _fast_dot
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _incremental_mean_and_var
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.extmath import softmax
from sklearn.utils.extmath import stable_cumsum
from sklearn.exceptions import ConvergenceWarning
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'LU', 'QR']: # 'none' would not be stable
# compute the singular values of X using the fast approximate method
Ua, sa, Va = \
randomized_svd(X, k, power_iteration_normalizer=normalizer,
random_state=0)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the
# real rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = \
randomized_svd(X, k, power_iteration_normalizer=normalizer,
random_state=0)
assert_almost_equal(s[:rank], sa[:rank])
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
for dtype in (np.float32, np.float64):
if dtype is np.float32:
precision = 4
else:
precision = 5
X = X.astype(dtype)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True),
precision)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X), precision)
Xcsr = sparse.csr_matrix(X, dtype=dtype)
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True),
precision)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr), precision)
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.1,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate
# method without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer,
random_state=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.01)
# compute the singular values of X using the fast approximate
# method with iterated power method
_, sap, _ = randomized_svd(X, k,
power_iteration_normalizer=normalizer,
random_state=0)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method
# with iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5,
power_iteration_normalizer=normalizer)
# the iterated power method is still managing to get most of the
# structure at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limited impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_randomized_svd_power_iteration_normalizer():
# randomized_svd with power_iteration_normalized='none' diverges for
# large number of power iterations on this dataset
rng = np.random.RandomState(42)
X = make_low_rank_matrix(100, 500, effective_rank=50, random_state=rng)
X += 3 * rng.randint(0, 2, size=X.shape)
n_components = 50
# Check that it diverges with many (non-normalized) power iterations
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
U, s, V = randomized_svd(X, n_components, n_iter=20,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_20 = linalg.norm(A, ord='fro')
assert_greater(np.abs(error_2 - error_20), 100)
for normalizer in ['LU', 'QR', 'auto']:
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer=normalizer,
random_state=0)
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
for i in [5, 10, 50]:
U, s, V = randomized_svd(X, n_components, n_iter=i,
power_iteration_normalizer=normalizer,
random_state=0)
A = X - U.dot(np.diag(s).dot(V))
error = linalg.norm(A, ord='fro')
assert_greater(15, np.abs(error_2 - error))
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_randomized_svd_sign_flip_with_transpose():
# Check if the randomized_svd sign flipping is always done based on u
# irrespective of transpose.
# See https://github.com/scikit-learn/scikit-learn/issues/5608
# for more details.
def max_loading_is_positive(u, v):
"""
returns bool tuple indicating if the values maximising np.abs
are positive across all rows for u and across all columns for v.
"""
u_based = (np.abs(u).max(axis=0) == u.max(axis=0)).all()
v_based = (np.abs(v).max(axis=1) == v.max(axis=1)).all()
return u_based, v_based
mat = np.arange(10 * 8).reshape(10, -1)
# Without transpose
u_flipped, _, v_flipped = randomized_svd(mat, 3, flip_sign=True)
u_based, v_based = max_loading_is_positive(u_flipped, v_flipped)
assert_true(u_based)
assert_false(v_based)
# With transpose
u_flipped_with_transpose, _, v_flipped_with_transpose = randomized_svd(
mat, 3, flip_sign=True, transpose=True)
u_based, v_based = max_loading_is_positive(
u_flipped_with_transpose, v_flipped_with_transpose)
assert_true(u_based)
assert_false(v_based)
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
def naive_log_logistic(x):
return np.log(1 / (1 + np.exp(-x)))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_fast_dot():
# Check fast dot blas wrapper function
if fast_dot is np.dot:
return
rng = np.random.RandomState(42)
A = rng.random_sample([2, 10])
B = rng.random_sample([2, 10])
try:
linalg.get_blas_funcs(['gemm'])[0]
has_blas = True
except (AttributeError, ValueError):
has_blas = False
if has_blas:
# Test _fast_dot for invalid input.
# Maltyped data.
for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]:
assert_raises(ValueError, _fast_dot, A.astype(dt1),
B.astype(dt2).T)
# Malformed data.
# ndim == 0
E = np.empty(0)
assert_raises(ValueError, _fast_dot, E, E)
# ndim == 1
assert_raises(ValueError, _fast_dot, A, A[0])
# ndim > 2
assert_raises(ValueError, _fast_dot, A.T, np.array([A, A]))
# min(shape) == 1
assert_raises(ValueError, _fast_dot, A, A[0, :][None, :])
# test for matrix mismatch error
assert_raises(ValueError, _fast_dot, A, A)
# Test cov-like use case + dtypes.
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
# col < row
C = np.dot(A.T, A)
C_ = fast_dot(A.T, A)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A, B.T)
C_ = fast_dot(A, B.T)
assert_almost_equal(C, C_, decimal=5)
# Test square matrix * rectangular use case.
A = rng.random_sample([2, 2])
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
C = np.dot(A, B)
C_ = fast_dot(A, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
if has_blas:
for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]:
assert_raises(ValueError, _fast_dot, x, x.T)
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = X1.shape[0]
final_means, final_variances, final_count = \
_incremental_mean_and_var(X2, old_means, old_variances,
old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
@skip_if_32bit
def test_incremental_variance_numerical_stability():
# Test Youngs and Cramer incremental variance formulas.
def np_var(A):
return A.var(axis=0)
# Naive one pass variance computation - not numerically stable
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
def one_pass_var(X):
n = X.shape[0]
exp_x2 = (X ** 2).sum(axis=0) / n
expx_2 = (X.sum(axis=0) / n) ** 2
return exp_x2 - expx_2
# Two-pass algorithm, stable.
# We use it as a benchmark. It is not an online algorithm
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm
def two_pass_var(X):
mean = X.mean(axis=0)
Y = X.copy()
return np.mean((Y - mean)**2, axis=0)
# Naive online implementation
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
# This works only for chunks for size 1
def naive_mean_variance_update(x, last_mean, last_variance,
last_sample_count):
updated_sample_count = (last_sample_count + 1)
samples_ratio = last_sample_count / float(updated_sample_count)
updated_mean = x / updated_sample_count + last_mean * samples_ratio
updated_variance = last_variance * samples_ratio + \
(x - last_mean) * (x - updated_mean) / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
# We want to show a case when one_pass_var has error > 1e-3 while
# _batch_mean_variance_update has less.
tol = 200
n_features = 2
n_samples = 10000
x1 = np.array(1e8, dtype=np.float64)
x2 = np.log(1e-5, dtype=np.float64)
A0 = x1 * np.ones((n_samples // 2, n_features), dtype=np.float64)
A1 = x2 * np.ones((n_samples // 2, n_features), dtype=np.float64)
A = np.vstack((A0, A1))
# Older versions of numpy have different precision
# In some old version, np.var is not stable
if np.abs(np_var(A) - two_pass_var(A)).max() < 1e-6:
stable_var = np_var
else:
stable_var = two_pass_var
# Naive one pass var: >tol (=1063)
assert_greater(np.abs(stable_var(A) - one_pass_var(A)).max(), tol)
# Starting point for online algorithms: after A0
# Naive implementation: >tol (436)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
naive_mean_variance_update(A1[i, :], mean, var, n)
assert_equal(n, A.shape[0])
# the mean is also slightly unstable
assert_greater(np.abs(A.mean(axis=0) - mean).max(), 1e-6)
assert_greater(np.abs(stable_var(A) - var).max(), tol)
# Robust implementation: <tol (177)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
_incremental_mean_and_var(A1[i, :].reshape((1, A1.shape[1])),
mean, var, n)
assert_equal(n, A.shape[0])
assert_array_almost_equal(A.mean(axis=0), mean)
assert_greater(tol, np.abs(stable_var(A) - var).max())
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = batch.shape[0]
else:
result = _incremental_mean_and_var(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
def test_softmax():
rng = np.random.RandomState(0)
X = rng.randn(3, 5)
exp_X = np.exp(X)
sum_exp_X = np.sum(exp_X, axis=1).reshape((-1, 1))
assert_array_almost_equal(softmax(X), exp_X / sum_exp_X)
def test_stable_cumsum():
if np_version < (1, 9):
raise SkipTest("Sum is as unstable as cumsum for numpy < 1.9")
assert_array_equal(stable_cumsum([1, 2, 3]), np.cumsum([1, 2, 3]))
r = np.random.RandomState(0).rand(100000)
assert_warns(ConvergenceWarning, stable_cumsum, r, rtol=0, atol=0)
# test axis parameter
A = np.random.RandomState(36).randint(1000, size=(5, 5, 5))
assert_array_equal(stable_cumsum(A, axis=0), np.cumsum(A, axis=0))
assert_array_equal(stable_cumsum(A, axis=1), np.cumsum(A, axis=1))
assert_array_equal(stable_cumsum(A, axis=2), np.cumsum(A, axis=2))
| bsd-3-clause |
robintw/scikit-image | skimage/io/tests/test_mpl_imshow.py | 12 | 2852 | from __future__ import division
import numpy as np
from skimage import io
from skimage._shared._warnings import expected_warnings
import matplotlib.pyplot as plt
def setup():
io.reset_plugins()
# test images. Note that they don't have their full range for their dtype,
# but we still expect the display range to equal the full dtype range.
im8 = np.array([[0, 64], [128, 240]], np.uint8)
im16 = im8.astype(np.uint16) * 256
im64 = im8.astype(np.uint64)
imf = im8 / 255
im_lo = imf / 1000
im_hi = imf + 10
def n_subplots(ax_im):
"""Return the number of subplots in the figure containing an ``AxesImage``.
Parameters
----------
ax_im : matplotlib.pyplot.AxesImage object
The input ``AxesImage``.
Returns
-------
n : int
The number of subplots in the corresponding figure.
Notes
-----
This function is intended to check whether a colorbar was drawn, in
which case two subplots are expected. For standard imshows, one
subplot is expected.
"""
return len(ax_im.get_figure().get_axes())
def test_uint8():
plt.figure()
ax_im = io.imshow(im8)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 255)
assert n_subplots(ax_im) == 1
assert ax_im.colorbar is None
def test_uint16():
plt.figure()
ax_im = io.imshow(im16)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 65535)
assert n_subplots(ax_im) == 1
assert ax_im.colorbar is None
def test_float():
plt.figure()
ax_im = io.imshow(imf)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 1)
assert n_subplots(ax_im) == 1
assert ax_im.colorbar is None
def test_low_dynamic_range():
with expected_warnings(["Low image dynamic range"]):
ax_im = io.imshow(im_lo)
assert ax_im.get_clim() == (im_lo.min(), im_lo.max())
# check that a colorbar was created
assert ax_im.colorbar is not None
def test_outside_standard_range():
plt.figure()
with expected_warnings(["out of standard range"]):
ax_im = io.imshow(im_hi)
assert ax_im.get_clim() == (im_hi.min(), im_hi.max())
assert n_subplots(ax_im) == 2
assert ax_im.colorbar is not None
def test_nonstandard_type():
plt.figure()
with expected_warnings(["Non-standard image type",
"Low image dynamic range"]):
ax_im = io.imshow(im64)
assert ax_im.get_clim() == (im64.min(), im64.max())
assert n_subplots(ax_im) == 2
assert ax_im.colorbar is not None
def test_signed_image():
plt.figure()
im_signed = np.array([[-0.5, -0.2], [0.1, 0.4]])
ax_im = io.imshow(im_signed)
assert ax_im.get_clim() == (-0.5, 0.5)
assert n_subplots(ax_im) == 2
assert ax_im.colorbar is not None
if __name__ == '__main__':
np.testing.run_module_suite()
| bsd-3-clause |
shenzebang/scikit-learn | sklearn/metrics/cluster/tests/test_bicluster.py | 394 | 1770 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
| bsd-3-clause |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/pyRiemann-0.2.2/examples/motor-imagery/single_subject.py | 1 | 3861 | #generic import
import numpy as np
import sys
#mne import
from mne import Epochs, pick_types
from mne.io import concatenate_raws
from mne.io.edf import read_raw_edf
from mne.datasets import eegbci
from mne.event import find_events
from mne.decoding import CSP
#pyriemann import
from pyriemann.classification import MDM,FgMDM
from pyriemann.tangentspace import TangentSpace
from pyriemann.estimation import covariances
#sklearn imports
from sklearn.cross_validation import cross_val_score, KFold
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.lda import LDA
###############################################################################
## Set parameters and read data
# avoid classification of evoked responses by using epochs that start 1s after
# cue onset.
tmin, tmax = 1., 2.
event_id = dict(hands=2, feet=3)
subject = 7
runs = [6, 10, 14] # motor imagery: hands vs feet
raw_files = [read_raw_edf(f, preload=True) for f in eegbci.load_data(subject, runs)]
raw = concatenate_raws(raw_files)
picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
exclude='bads')
#subsample elecs
picks = picks[::2]
# Apply band-pass filter
raw.filter(7., 35., method='iir',picks=picks)
events = find_events(raw, shortest_event=0, stim_channel='STI 014')
# Read epochs (train will be done only between 1 and 2s)
# Testing will be done with a running classifier
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
baseline=None, preload=True, add_eeg_ref=False,verbose = False)
labels = epochs.events[:, -1] - 2
# cross validation
cv = KFold(len(labels),10, shuffle=True, random_state=42)
# get epochs
epochs_data_train = epochs.get_data()
# compute covariance matrices
cov_data_train = covariances(epochs_data_train)
###############################################################################
# Classification with Minimum distance to mean
mdm = MDM()
# Use scikit-learn Pipeline with cross_val_score function
scores = cross_val_score(mdm, cov_data_train, labels, cv=cv, n_jobs=1)
# Printing the results
class_balance = np.mean(labels == labels[0])
class_balance = max(class_balance, 1. - class_balance)
print("MDM Classification accuracy: %f / Chance level: %f" % (np.mean(scores),
class_balance))
###############################################################################
# Classification with Tangent Space Logistic Regression
ts = TangentSpace(metric='riemann')
lr = LogisticRegression(penalty='l2')
clf = Pipeline([('TS', ts), ('LR', lr)])
# Use scikit-learn Pipeline with cross_val_score function
scores = cross_val_score(clf, cov_data_train, labels, cv=cv, n_jobs=1)
# Printing the results
class_balance = np.mean(labels == labels[0])
class_balance = max(class_balance, 1. - class_balance)
print("TS + LR Classification accuracy: %f / Chance level: %f" % (np.mean(scores),
class_balance))
###############################################################################
# Classification with CSP + linear discrimant analysis
# Assemble a classifier
lda = LDA()
csp = CSP(n_components=4, reg='lws', log=True)
clf = Pipeline([('CSP', csp), ('LDA', lda)])
scores = cross_val_score(clf, epochs_data_train, labels, cv=cv, n_jobs=1)
# Printing the results
class_balance = np.mean(labels == labels[0])
class_balance = max(class_balance, 1. - class_balance)
print("CSP + LDA Classification accuracy: %f / Chance level: %f" % (np.mean(scores),
class_balance))
| bsd-3-clause |
petosegan/scikit-learn | sklearn/linear_model/setup.py | 169 | 1567 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('linear_model', parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension('cd_fast', sources=['cd_fast.c'],
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]), **blas_info)
config.add_extension('sgd_fast',
sources=['sgd_fast.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
# add other directories
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
kshedstrom/pyroms | examples/Arctic_HYCOM/get_hycom_GLBa0.08_temp_2014.py | 2 | 4746 | import matplotlib
matplotlib.use('Agg')
import numpy as np
import netCDF4
from datetime import datetime
import pyroms
import pyroms_toolbox
import sys
def create_HYCOM_file(name, time, lon, lat, z, var):
print 'Write with file %s' %name
#create netCDF file
nc = netCDF4.Dataset(name, 'w', format='NETCDF3_64BIT')
nc.Author = sys._getframe().f_code.co_name
nc.Created = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
nc.title = 'HYCOM + NCODA Global 1/12 Analysis (GLBa0.08)'
#create dimensions
Mp, Lp = lon.shape
N = len(z)
nc.createDimension('lon', Lp)
nc.createDimension('lat', Mp)
nc.createDimension('z', N)
nc.createDimension('ocean_time', None)
#create variables
nc.createVariable('lon', 'f', ('lat', 'lon'))
nc.variables['lon'].long_name = 'longitude'
nc.variables['lon'].units = 'degrees_east'
nc.variables['lon'][:] = lon
nc.createVariable('lat', 'f', ('lat', 'lon'))
nc.variables['lat'].long_name = 'latitude'
nc.variables['lat'].units = 'degrees_north'
nc.variables['lat'][:] = lat
nc.createVariable('z', 'f', ('z'))
nc.variables['z'].long_name = 'depth'
nc.variables['z'].units = 'meter'
nc.variables['z'][:] = z
nc.createVariable('ocean_time', 'f', ('ocean_time'))
nc.variables['ocean_time'].units = 'days since 1900-01-01 00:00:00'
nc.variables['ocean_time'].calendar = 'LEAP'
nc.variables['ocean_time'][0] = time
nc.createVariable(outvarname, 'f', ('ocean_time', 'z', 'lat', 'lon'), fill_value=spval)
nc.variables[outvarname].long_name = long_name
nc.variables[outvarname].units = units
nc.variables[outvarname].coordinates = 'lon lat'
nc.variables[outvarname][0] = var
nc.close()
print 'Done with file %s' %name
# get HYCOM Northeast Pacific data from 2007 to 2009
year = 2014
retry='True'
invarname = 'temperature'
outvarname = 'temp'
#read grid and variable attributes from the first file
url='http://tds.hycom.org/thredds/dodsC/datasets/GLBa0.08/expt_90.6/2009/temp/archv.2009_001_00_3zt.nc'
dataset = netCDF4.Dataset(url)
lon = dataset.variables['Longitude'][2100:,550:4040]
lat = dataset.variables['Latitude'][2100:,550:4040]
z = dataset.variables['Depth'][:]
#spval = dataset.variables[invarname]._FillValue
units = dataset.variables[invarname].units
long_name = dataset.variables[invarname].long_name
dataset.close()
retry_day = []
# loop over daily files
if year%4 == 0:
daysinyear = 366
else:
daysinyear = 365
daysinyear = 94
for day in range(1,daysinyear+1):
#for day in range(95,daysinyear+1):
print 'Processing file for %s, day %03d, year %04d' %(invarname, day, year)
url='http://tds.hycom.org/thredds/dodsC/datasets/GLBa0.08/expt_91.0/2014/temp/archv.%04d_%03d_00_3zt.nc' %(year,day)
# url='http://tds.hycom.org/thredds/dodsC/datasets/GLBa0.08/expt_91.1/2014/temp/archv.%04d_%03d_00_3zt.nc' %(year,day)
#get data from server
try:
dataset = netCDF4.Dataset(url)
var = dataset.variables[invarname][0,:,2100:,550:4040]
spval = var.get_fill_value()
dataset.close()
print 'Got %s from server...' %invarname
except:
print 'No file on the server... We skip this day.'
retry_day.append(day)
continue
#create netCDF file
outfile = 'data/HYCOM_GLBa0.08_%s_%04d_%03d.nc' %(outvarname,year,day)
jday = pyroms_toolbox.date2jday(datetime(year, 1, 1)) + day - 1
create_HYCOM_file(outfile, jday, lon, lat, z, var)
if retry == 'True':
if len(retry_day) != 0:
print "Some file have not been downloded... Let's try again"
while len(retry_day) != 0:
for day in retry_day:
print 'Retry file for %s, day %03d, year %04d' %(invarname, day, year)
url='http://tds.hycom.org/thredds/dodsC/datasets/GLBa0.08/expt_91.0/2014/temp/archv.%04d_%03d_00_3zt.nc' %(year,day)
# url='http://tds.hycom.org/thredds/dodsC/datasets/GLBa0.08/expt_91.1/2014/temp/archv.%04d_%03d_00_3zt.nc' %(year,day)
#get data from server
try:
dataset = netCDF4.Dataset(url)
var = dataset.variables[invarname][0,:,2100:,550:4040]
spval = var.get_fill_value()
dataset.close()
print 'Got %s from server...' %invarname
except:
print 'No file on the server... We skip this day.'
continue
#create netCDF file
outfile = 'data/HYCOM_GLBa0.08_%s_%04d_%03d.nc' %(outvarname,year,day)
jday = pyroms_toolbox.date2jday(datetime(year, 1, 1)) + day - 1
create_HYCOM_file(outfile, jday, lon, lat, z, var)
retry_day.remove(day)
| bsd-3-clause |
RTHMaK/RPGOne | Documents/skflow-master/skflow/estimators/rnn.py | 2 | 10455 | """Recurrent Neural Network estimators."""
# Copyright 2015-present Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function, absolute_import
from sklearn.base import ClassifierMixin, RegressorMixin
from skflow.estimators.base import TensorFlowEstimator
from skflow import models
def null_input_op_fn(X):
"""This function does no transformation on the inputs, used as default"""
return X
class TensorFlowRNNClassifier(TensorFlowEstimator, ClassifierMixin):
"""TensorFlow RNN Classifier model.
Parameters:
rnn_size: The size for rnn cell, e.g. size of your word embeddings.
cell_type: The type of rnn cell, including rnn, gru, and lstm.
num_layers: The number of layers of the rnn model.
input_op_fn: Function that will transform the input tensor, such as
creating word embeddings, byte list, etc. This takes
an argument X for input and returns transformed X.
bidirectional: boolean, Whether this is a bidirectional rnn.
sequence_length: If sequence_length is provided, dynamic calculation is performed.
This saves computational time when unrolling past max sequence length.
initial_state: An initial state for the RNN. This must be a tensor of appropriate type
and shape [batch_size x cell.state_size].
n_classes: Number of classes in the target.
tf_master: TensorFlow master. Empty string is default for local.
batch_size: Mini batch size.
steps: Number of steps to run over data.
optimizer: Optimizer name (or class), for example "SGD", "Adam",
"Adagrad".
learning_rate: If this is constant float value, no decay function is used.
Instead, a customized decay function can be passed that accepts
global_step as parameter and returns a Tensor.
e.g. exponential decay function:
def exp_decay(global_step):
return tf.train.exponential_decay(
learning_rate=0.1, global_step,
decay_steps=2, decay_rate=0.001)
class_weight: None or list of n_classes floats. Weight associated with
classes for loss computation. If not given, all classes are suppose to have
weight one.
tf_random_seed: Random seed for TensorFlow initializers.
Setting this value, allows consistency between reruns.
continue_training: when continue_training is True, once initialized
model will be continuely trained on every call of fit.
num_cores: Number of cores to be used. (default: 4)
max_to_keep: The maximum number of recent checkpoint files to keep.
As new files are created, older files are deleted.
If None or 0, all checkpoint files are kept.
Defaults to 5 (that is, the 5 most recent checkpoint files are kept.)
keep_checkpoint_every_n_hours: Number of hours between each checkpoint
to be saved. The default value of 10,000 hours effectively disables the feature.
"""
def __init__(self, rnn_size, n_classes, cell_type='gru', num_layers=1,
input_op_fn=null_input_op_fn,
initial_state=None, bidirectional=False,
sequence_length=None, tf_master="", batch_size=32,
steps=50, optimizer="SGD", learning_rate=0.1,
class_weight=None,
tf_random_seed=42, continue_training=False,
config_addon=None, verbose=1,
max_to_keep=5, keep_checkpoint_every_n_hours=10000):
self.rnn_size = rnn_size
self.cell_type = cell_type
self.input_op_fn = input_op_fn
self.bidirectional = bidirectional
self.num_layers = num_layers
self.sequence_length = sequence_length
self.initial_state = initial_state
super(TensorFlowRNNClassifier, self).__init__(
model_fn=self._model_fn,
n_classes=n_classes, tf_master=tf_master,
batch_size=batch_size, steps=steps, optimizer=optimizer,
learning_rate=learning_rate, class_weight=class_weight,
tf_random_seed=tf_random_seed,
continue_training=continue_training, config_addon=config_addon,
verbose=verbose,
max_to_keep=max_to_keep,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
def _model_fn(self, X, y):
return models.get_rnn_model(self.rnn_size, self.cell_type,
self.num_layers,
self.input_op_fn, self.bidirectional,
models.logistic_regression,
self.sequence_length,
self.initial_state)(X, y)
@property
def bias_(self):
"""Returns bias of the rnn layer."""
return self.get_tensor_value('logistic_regression/bias:0')
@property
def weights_(self):
"""Returns weights of the rnn layer."""
return self.get_tensor_value('logistic_regression/weights:0')
class TensorFlowRNNRegressor(TensorFlowEstimator, RegressorMixin):
"""TensorFlow RNN Regressor model.
Parameters:
rnn_size: The size for rnn cell, e.g. size of your word embeddings.
cell_type: The type of rnn cell, including rnn, gru, and lstm.
num_layers: The number of layers of the rnn model.
input_op_fn: Function that will transform the input tensor, such as
creating word embeddings, byte list, etc. This takes
an argument X for input and returns transformed X.
bidirectional: boolean, Whether this is a bidirectional rnn.
sequence_length: If sequence_length is provided, dynamic calculation is performed.
This saves computational time when unrolling past max sequence length.
initial_state: An initial state for the RNN. This must be a tensor of appropriate type
and shape [batch_size x cell.state_size].
tf_master: TensorFlow master. Empty string is default for local.
batch_size: Mini batch size.
steps: Number of steps to run over data.
optimizer: Optimizer name (or class), for example "SGD", "Adam",
"Adagrad".
learning_rate: If this is constant float value, no decay function is used.
Instead, a customized decay function can be passed that accepts
global_step as parameter and returns a Tensor.
e.g. exponential decay function:
def exp_decay(global_step):
return tf.train.exponential_decay(
learning_rate=0.1, global_step,
decay_steps=2, decay_rate=0.001)
tf_random_seed: Random seed for TensorFlow initializers.
Setting this value, allows consistency between reruns.
continue_training: when continue_training is True, once initialized
model will be continuely trained on every call of fit.
num_cores: Number of cores to be used. (default: 4)
verbose: Controls the verbosity, possible values:
0: the algorithm and debug information is muted.
1: trainer prints the progress.
2: log device placement is printed.
max_to_keep: The maximum number of recent checkpoint files to keep.
As new files are created, older files are deleted.
If None or 0, all checkpoint files are kept.
Defaults to 5 (that is, the 5 most recent checkpoint files are kept.)
keep_checkpoint_every_n_hours: Number of hours between each checkpoint
to be saved. The default value of 10,000 hours effectively disables the feature.
"""
def __init__(self, rnn_size, cell_type='gru', num_layers=1,
input_op_fn=null_input_op_fn, initial_state=None,
bidirectional=False, sequence_length=None,
n_classes=0, tf_master="", batch_size=32,
steps=50, optimizer="SGD", learning_rate=0.1,
tf_random_seed=42, continue_training=False,
config_addon=None, verbose=1,
max_to_keep=5, keep_checkpoint_every_n_hours=10000):
self.rnn_size = rnn_size
self.cell_type = cell_type
self.input_op_fn = input_op_fn
self.bidirectional = bidirectional
self.num_layers = num_layers
self.sequence_length = sequence_length
self.initial_state = initial_state
super(TensorFlowRNNRegressor, self).__init__(
model_fn=self._model_fn,
n_classes=n_classes, tf_master=tf_master,
batch_size=batch_size, steps=steps, optimizer=optimizer,
learning_rate=learning_rate, tf_random_seed=tf_random_seed,
continue_training=continue_training, config_addon=config_addon,
verbose=verbose, max_to_keep=max_to_keep,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
def _model_fn(self, X, y):
return models.get_rnn_model(self.rnn_size, self.cell_type,
self.num_layers,
self.input_op_fn, self.bidirectional,
models.linear_regression,
self.sequence_length,
self.initial_state)(X, y)
@property
def bias_(self):
"""Returns bias of the rnn layer."""
return self.get_tensor_value('linear_regression/bias:0')
@property
def weights_(self):
"""Returns weights of the rnn layer."""
return self.get_tensor_value('linear_regression/weights:0')
| apache-2.0 |
tacaswell/bokeh | bokeh/charts/builder/histogram_builder.py | 43 | 9142 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Histogram class which lets you build your histograms just passing
the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
try:
import scipy.special
_is_scipy = True
except ImportError as e:
_is_scipy = False
import numpy as np
from ..utils import chunk, cycle_colors
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, GlyphRenderer, Range1d
from ...models.glyphs import Line, Quad
from ...properties import Bool, Float, Int
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Histogram(values, bins, mu=None, sigma=None, density=True, **kws):
""" Create a histogram chart using :class:`HistogramBuilder <bokeh.charts.builder.histogram_builder.HistogramBuilder>`
to render the geometry from values, bins, sigma and density.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
bins (int): number of bins to use in the Histogram building.
mu (float, optional): theoretical mean value for the normal
distribution. (default: None)
sigma (float, optional): theoretical sigma value for the
normal distribution. (default: None)
density (bool, optional): If False, the result will contain
the number of samples in each bin. If True, the result
is the value of the probability *density* function at
the bin, normalized such that the *integral* over the
range is 1. For more info check numpy.histogram
function documentation. (default: True)
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
import pandas as pd
from bokeh.charts import Histogram, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
xyvalues = pd.DataFrame(dict(normal=[1, 2, 3, 1], lognormal=[5, 4, 4, 1]))
hm = Histogram(xyvalues, bins=5, title='Histogram')
output_file('histogram.html')
show(hm)
"""
return create_and_build(
HistogramBuilder, values, bins=bins, mu=mu, sigma=sigma, density=density,
**kws
)
class HistogramBuilder(Builder):
"""This is the Histogram class and it is in charge of plotting
histograms in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed glyphs (quads and lines) taking the
references from the source.
"""
bins = Int(10, help="""
Number of bins to use for the histogram. (default: 10)
""")
mu = Float(help="""
Theoretical mean value for the normal distribution. (default: None)
""")
sigma = Float(help="""
Theoretical standard deviation value for the normal distribution.
(default: None)
""")
density = Bool(True, help="""
Whether to normalize the histogram. (default: True)
If True, the result is the value of the probability *density* function
at the bin, normalized such that the *integral* over the range is 1. If
False, the result will contain the number of samples in each bin.
For more info check ``numpy.histogram`` function documentation.
""")
def _process_data(self):
"""Take the Histogram data from the input **value.
It calculates the chart properties accordingly. Then build a dict
containing references to all the calculated points to be used by
the quad and line glyphs inside the ``_yield_renderers`` method.
"""
# list to save all the groups available in the incomming input
self._groups.extend(self._values.keys())
# fill the data dictionary with the proper values
for i, (val, values) in enumerate(self._values.items()):
self.set_and_get("", val, values)
#build the histogram using the set bins number
hist, edges = np.histogram(
np.array(values), density=self.density, bins=self.bins
)
self.set_and_get("hist", val, hist)
self.set_and_get("edges", val, edges)
self.set_and_get("left", val, edges[:-1])
self.set_and_get("right", val, edges[1:])
self.set_and_get("bottom", val, np.zeros(len(hist)))
self._mu_and_sigma = False
if self.mu is not None and self.sigma is not None:
if _is_scipy:
self._mu_and_sigma = True
self.set_and_get("x", val, np.linspace(-2, 2, len(self._data[val])))
den = 2 * self.sigma ** 2
x_val = self._data["x" + val]
x_val_mu = x_val - self.mu
sigsqr2pi = self.sigma * np.sqrt(2 * np.pi)
pdf = 1 / (sigsqr2pi) * np.exp(-x_val_mu ** 2 / den)
self.set_and_get("pdf", val, pdf)
self._groups.append("pdf")
cdf = (1 + scipy.special.erf(x_val_mu / np.sqrt(den))) / 2
self.set_and_get("cdf", val, cdf)
self._groups.append("cdf")
else:
print("You need scipy to get the theoretical probability distributions.")
def _set_sources(self):
"""Push the Histogram data into the ColumnDataSource and calculate
the proper ranges."""
self._source = ColumnDataSource(data=self._data)
if not self._mu_and_sigma:
x_names, y_names = self._attr[2::6], self._attr[1::6]
else:
x_names, y_names = self._attr[2::9], self._attr[1::9]
endx = max(max(self._data[i]) for i in x_names)
startx = min(min(self._data[i]) for i in x_names)
self.x_range = Range1d(start=startx - 0.1 * (endx - startx),
end=endx + 0.1 * (endx - startx))
endy = max(max(self._data[i]) for i in y_names)
self.y_range = Range1d(start=0, end=1.1 * endy)
def _yield_renderers(self):
"""Use the several glyphs to display the Histogram and pdf/cdf.
It uses the quad (and line) glyphs to display the Histogram
bars, taking as reference points the data loaded at the
ColumnDataSurce.
"""
if not self._mu_and_sigma:
sextets = list(chunk(self._attr, 6))
colors = cycle_colors(sextets, self.palette)
# TODO (bev) this is a perfect use for a namedtuple
# sextet: values, his, edges, left, right, bottom
for i, sextet in enumerate(sextets):
glyph = Quad(
top=sextet[1], bottom=sextet[5], left=sextet[3], right=sextet[4],
fill_color=colors[i], fill_alpha=0.7,
line_color="white", line_alpha=1.0
)
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i], [renderer]))
yield renderer
else:
nonets = list(chunk(self._attr, 9))
colors = cycle_colors(nonets, self.palette)
# TODO (bev) this is a perfect use for a namedtuple
# nonet: values, his, edges, left, right, bottom, x, pdf, cdf
for i, nonet in enumerate(nonets):
glyph = Quad(
top=nonet[1], bottom=nonet[5], left=nonet[3], right=nonet[4],
fill_color=colors[i], fill_alpha=0.7,
line_color="white", line_alpha=1.0
)
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i], [renderer]))
yield renderer
glyph = Line(x=nonet[6], y=nonet[7], line_color="black")
yield GlyphRenderer(data_source=self._source, glyph=glyph)
glyph = Line(x=nonet[6], y=nonet[8], line_color="blue")
yield GlyphRenderer(data_source=self._source, glyph=glyph)
| bsd-3-clause |
shuangshuangwang/spark | python/pyspark/sql/functions.py | 1 | 154771 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A collections of builtin functions
"""
import sys
import functools
import warnings
from pyspark import since, SparkContext
from pyspark.rdd import PythonEvalType
from pyspark.sql.column import Column, _to_java_column, _to_seq, _create_column_from_literal
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.types import StringType, DataType
# Keep UserDefinedFunction import for backwards compatible import; moved in SPARK-22409
from pyspark.sql.udf import UserDefinedFunction, _create_udf # noqa: F401
from pyspark.sql.udf import _create_udf
# Keep pandas_udf and PandasUDFType import for backwards compatible import; moved in SPARK-28264
from pyspark.sql.pandas.functions import pandas_udf, PandasUDFType # noqa: F401
from pyspark.sql.utils import to_str
# Note to developers: all of PySpark functions here take string as column names whenever possible.
# Namely, if columns are referred as arguments, they can be always both Column or string,
# even though there might be few exceptions for legacy or inevitable reasons.
# If you are fixing other language APIs together, also please note that Scala side is not the case
# since it requires to make every single overridden definition.
def _get_get_jvm_function(name, sc):
"""
Retrieves JVM function identified by name from
Java gateway associated with sc.
"""
return getattr(sc._jvm.functions, name)
def _invoke_function(name, *args):
"""
Invokes JVM function identified by name with args
and wraps the result with :class:`Column`.
"""
jf = _get_get_jvm_function(name, SparkContext._active_spark_context)
return Column(jf(*args))
def _invoke_function_over_column(name, col):
"""
Invokes unary JVM function identified by name
and wraps the result with :class:`Column`.
"""
return _invoke_function(name, _to_java_column(col))
def _invoke_binary_math_function(name, col1, col2):
"""
Invokes binary JVM math function identified by name
and wraps the result with :class:`Column`.
"""
return _invoke_function(
name,
# For legacy reasons, the arguments here can be implicitly converted into floats,
# if they are not columns or strings.
_to_java_column(col1) if isinstance(col1, (str, Column)) else float(col1),
_to_java_column(col2) if isinstance(col2, (str, Column)) else float(col2)
)
def _options_to_str(options):
return {key: to_str(value) for (key, value) in options.items()}
def lit(col):
"""
Creates a :class:`Column` of literal value.
.. versionadded:: 1.3.0
Examples
--------
>>> df.select(lit(5).alias('height')).withColumn('spark_user', lit(True)).take(1)
[Row(height=5, spark_user=True)]
"""
return col if isinstance(col, Column) else _invoke_function("lit", col)
@since(1.3)
def col(col):
"""
Returns a :class:`Column` based on the given column name.'
"""
return _invoke_function("col", col)
@since(1.3)
def column(col):
"""
Returns a :class:`Column` based on the given column name.'
"""
return col(col)
@since(1.3)
def asc(col):
"""
Returns a sort expression based on the ascending order of the given column name.
"""
return (
col.asc() if isinstance(col, Column)
else _invoke_function("asc", col)
)
@since(1.3)
def desc(col):
"""
Returns a sort expression based on the descending order of the given column name.
"""
return (
col.desc() if isinstance(col, Column)
else _invoke_function("desc", col)
)
@since(1.3)
def sqrt(col):
"""
Computes the square root of the specified float value.
"""
return _invoke_function_over_column("sqrt", col)
@since(1.3)
def abs(col):
"""
Computes the absolute value.
"""
return _invoke_function_over_column("abs", col)
@since(1.3)
def max(col):
"""
Aggregate function: returns the maximum value of the expression in a group.
"""
return _invoke_function_over_column("max", col)
@since(1.3)
def min(col):
"""
Aggregate function: returns the minimum value of the expression in a group.
"""
return _invoke_function_over_column("min", col)
@since(1.3)
def count(col):
"""
Aggregate function: returns the number of items in a group.
"""
return _invoke_function_over_column("count", col)
@since(1.3)
def sum(col):
"""
Aggregate function: returns the sum of all values in the expression.
"""
return _invoke_function_over_column("sum", col)
@since(1.3)
def avg(col):
"""
Aggregate function: returns the average of the values in a group.
"""
return _invoke_function_over_column("avg", col)
@since(1.3)
def mean(col):
"""
Aggregate function: returns the average of the values in a group.
"""
return _invoke_function_over_column("mean", col)
@since(1.3)
def sumDistinct(col):
"""
Aggregate function: returns the sum of distinct values in the expression.
"""
return _invoke_function_over_column("sumDistinct", col)
def acos(col):
"""
.. versionadded:: 1.4.0
Returns
-------
:class:`Column`
inverse cosine of `col`, as if computed by `java.lang.Math.acos()`
"""
return _invoke_function_over_column("acos", col)
def asin(col):
"""
.. versionadded:: 1.3.0
Returns
-------
:class:`Column`
inverse sine of `col`, as if computed by `java.lang.Math.asin()`
"""
return _invoke_function_over_column("asin", col)
def atan(col):
"""
.. versionadded:: 1.4.0
Returns
-------
:class:`Column`
inverse tangent of `col`, as if computed by `java.lang.Math.atan()`
"""
return _invoke_function_over_column("atan", col)
@since(1.4)
def cbrt(col):
"""
Computes the cube-root of the given value.
"""
return _invoke_function_over_column("cbrt", col)
@since(1.4)
def ceil(col):
"""
Computes the ceiling of the given value.
"""
return _invoke_function_over_column("ceil", col)
def cos(col):
"""
.. versionadded:: 1.4.0
Parameters
----------
col : :class:`Column` or str
angle in radians
Returns
-------
:class:`Column`
cosine of the angle, as if computed by `java.lang.Math.cos()`.
"""
return _invoke_function_over_column("cos", col)
def cosh(col):
"""
.. versionadded:: 1.4.0
Parameters
----------
col : :class:`Column` or str
hyperbolic angle
Returns
-------
:class:`Column`
hyperbolic cosine of the angle, as if computed by `java.lang.Math.cosh()`
"""
return _invoke_function_over_column("cosh", col)
@since(1.4)
def exp(col):
"""
Computes the exponential of the given value.
"""
return _invoke_function_over_column("exp", col)
@since(1.4)
def expm1(col):
"""
Computes the exponential of the given value minus one.
"""
return _invoke_function_over_column("expm1", col)
@since(1.4)
def floor(col):
"""
Computes the floor of the given value.
"""
return _invoke_function_over_column("floor", col)
@since(1.4)
def log(col):
"""
Computes the natural logarithm of the given value.
"""
return _invoke_function_over_column("log", col)
@since(1.4)
def log10(col):
"""
Computes the logarithm of the given value in Base 10.
"""
return _invoke_function_over_column("log10", col)
@since(1.4)
def log1p(col):
"""
Computes the natural logarithm of the given value plus one.
"""
return _invoke_function_over_column("log1p", col)
@since(1.4)
def rint(col):
"""
Returns the double value that is closest in value to the argument and
is equal to a mathematical integer.
"""
return _invoke_function_over_column("rint", col)
@since(1.4)
def signum(col):
"""
Computes the signum of the given value.
"""
return _invoke_function_over_column("signum", col)
def sin(col):
"""
.. versionadded:: 1.4.0
Parameters
----------
col : :class:`Column` or str
Returns
-------
:class:`Column`
sine of the angle, as if computed by `java.lang.Math.sin()`
"""
return _invoke_function_over_column("sin", col)
def sinh(col):
"""
.. versionadded:: 1.4.0
Parameters
----------
col : :class:`Column` or str
hyperbolic angle
Returns
-------
:class:`Column`
hyperbolic sine of the given value,
as if computed by `java.lang.Math.sinh()`
"""
return _invoke_function_over_column("sinh", col)
def tan(col):
"""
.. versionadded:: 1.4.0
Parameters
----------
col : :class:`Column` or str
angle in radians
Returns
-------
:class:`Column`
tangent of the given value, as if computed by `java.lang.Math.tan()`
"""
return _invoke_function_over_column("tan", col)
def tanh(col):
"""
.. versionadded:: 1.4.0
Parameters
----------
col : :class:`Column` or str
hyperbolic angle
Returns
-------
:class:`Column`
hyperbolic tangent of the given value
as if computed by `java.lang.Math.tanh()`
"""
return _invoke_function_over_column("tanh", col)
@since(1.4)
def toDegrees(col):
"""
.. deprecated:: 2.1.0
Use :func:`degrees` instead.
"""
warnings.warn("Deprecated in 2.1, use degrees instead.", DeprecationWarning)
return degrees(col)
@since(1.4)
def toRadians(col):
"""
.. deprecated:: 2.1.0
Use :func:`radians` instead.
"""
warnings.warn("Deprecated in 2.1, use radians instead.", DeprecationWarning)
return radians(col)
@since(1.4)
def bitwiseNOT(col):
"""
Computes bitwise not.
"""
return _invoke_function_over_column("bitwiseNOT", col)
@since(2.4)
def asc_nulls_first(col):
"""
Returns a sort expression based on the ascending order of the given
column name, and null values return before non-null values.
"""
return (
col.asc_nulls_first() if isinstance(col, Column)
else _invoke_function("asc_nulls_first", col)
)
@since(2.4)
def asc_nulls_last(col):
"""
Returns a sort expression based on the ascending order of the given
column name, and null values appear after non-null values.
"""
return (
col.asc_nulls_last() if isinstance(col, Column)
else _invoke_function("asc_nulls_last", col)
)
@since(2.4)
def desc_nulls_first(col):
"""
Returns a sort expression based on the descending order of the given
column name, and null values appear before non-null values.
"""
return (
col.desc_nulls_first() if isinstance(col, Column)
else _invoke_function("desc_nulls_first", col)
)
@since(2.4)
def desc_nulls_last(col):
"""
Returns a sort expression based on the descending order of the given
column name, and null values appear after non-null values.
"""
return (
col.desc_nulls_last() if isinstance(col, Column)
else _invoke_function("desc_nulls_last", col)
)
@since(1.6)
def stddev(col):
"""
Aggregate function: alias for stddev_samp.
"""
return _invoke_function_over_column("stddev", col)
@since(1.6)
def stddev_samp(col):
"""
Aggregate function: returns the unbiased sample standard deviation of
the expression in a group.
"""
return _invoke_function_over_column("stddev_samp", col)
@since(1.6)
def stddev_pop(col):
"""
Aggregate function: returns population standard deviation of
the expression in a group.
"""
return _invoke_function_over_column("stddev_pop", col)
@since(1.6)
def variance(col):
"""
Aggregate function: alias for var_samp
"""
return _invoke_function_over_column("variance", col)
@since(1.6)
def var_samp(col):
"""
Aggregate function: returns the unbiased sample variance of
the values in a group.
"""
return _invoke_function_over_column("var_samp", col)
@since(1.6)
def var_pop(col):
"""
Aggregate function: returns the population variance of the values in a group.
"""
return _invoke_function_over_column("var_pop", col)
@since(1.6)
def skewness(col):
"""
Aggregate function: returns the skewness of the values in a group.
"""
return _invoke_function_over_column("skewness", col)
@since(1.6)
def kurtosis(col):
"""
Aggregate function: returns the kurtosis of the values in a group.
"""
return _invoke_function_over_column("kurtosis", col)
def collect_list(col):
"""
Aggregate function: returns a list of objects with duplicates.
.. versionadded:: 1.6.0
Notes
-----
The function is non-deterministic because the order of collected results depends
on the order of the rows which may be non-deterministic after a shuffle.
Examples
--------
>>> df2 = spark.createDataFrame([(2,), (5,), (5,)], ('age',))
>>> df2.agg(collect_list('age')).collect()
[Row(collect_list(age)=[2, 5, 5])]
"""
return _invoke_function_over_column("collect_list", col)
def collect_set(col):
"""
Aggregate function: returns a set of objects with duplicate elements eliminated.
.. versionadded:: 1.6.0
Notes
-----
The function is non-deterministic because the order of collected results depends
on the order of the rows which may be non-deterministic after a shuffle.
Examples
--------
>>> df2 = spark.createDataFrame([(2,), (5,), (5,)], ('age',))
>>> df2.agg(collect_set('age')).collect()
[Row(collect_set(age)=[5, 2])]
"""
return _invoke_function_over_column("collect_set", col)
def degrees(col):
"""
Converts an angle measured in radians to an approximately equivalent angle
measured in degrees.
.. versionadded:: 2.1.0
Parameters
----------
col : :class:`Column` or str
angle in radians
Returns
-------
:class:`Column`
angle in degrees, as if computed by `java.lang.Math.toDegrees()`
"""
return _invoke_function_over_column("degrees", col)
def radians(col):
"""
Converts an angle measured in degrees to an approximately equivalent angle
measured in radians.
.. versionadded:: 2.1.0
Parameters
----------
col : :class:`Column` or str
angle in degrees
Returns
-------
:class:`Column`
angle in radians, as if computed by `java.lang.Math.toRadians()`
"""
return _invoke_function_over_column("radians", col)
def atan2(col1, col2):
"""
.. versionadded:: 1.4.0
Parameters
----------
col1 : str, :class:`Column` or float
coordinate on y-axis
col2 : str, :class:`Column` or float
coordinate on x-axis
Returns
-------
:class:`Column`
the `theta` component of the point
(`r`, `theta`)
in polar coordinates that corresponds to the point
(`x`, `y`) in Cartesian coordinates,
as if computed by `java.lang.Math.atan2()`
"""
return _invoke_binary_math_function("atan2", col1, col2)
@since(1.4)
def hypot(col1, col2):
"""
Computes ``sqrt(a^2 + b^2)`` without intermediate overflow or underflow.
"""
return _invoke_binary_math_function("hypot", col1, col2)
@since(1.4)
def pow(col1, col2):
"""
Returns the value of the first argument raised to the power of the second argument.
"""
return _invoke_binary_math_function("pow", col1, col2)
@since(1.6)
def row_number():
"""
Window function: returns a sequential number starting at 1 within a window partition.
"""
return _invoke_function("row_number")
@since(1.6)
def dense_rank():
"""
Window function: returns the rank of rows within a window partition, without any gaps.
The difference between rank and dense_rank is that dense_rank leaves no gaps in ranking
sequence when there are ties. That is, if you were ranking a competition using dense_rank
and had three people tie for second place, you would say that all three were in second
place and that the next person came in third. Rank would give me sequential numbers, making
the person that came in third place (after the ties) would register as coming in fifth.
This is equivalent to the DENSE_RANK function in SQL.
"""
return _invoke_function("dense_rank")
@since(1.6)
def rank():
"""
Window function: returns the rank of rows within a window partition.
The difference between rank and dense_rank is that dense_rank leaves no gaps in ranking
sequence when there are ties. That is, if you were ranking a competition using dense_rank
and had three people tie for second place, you would say that all three were in second
place and that the next person came in third. Rank would give me sequential numbers, making
the person that came in third place (after the ties) would register as coming in fifth.
This is equivalent to the RANK function in SQL.
"""
return _invoke_function("rank")
@since(1.6)
def cume_dist():
"""
Window function: returns the cumulative distribution of values within a window partition,
i.e. the fraction of rows that are below the current row.
"""
return _invoke_function("cume_dist")
@since(1.6)
def percent_rank():
"""
Window function: returns the relative rank (i.e. percentile) of rows within a window partition.
"""
return _invoke_function("percent_rank")
@since(1.3)
def approxCountDistinct(col, rsd=None):
"""
.. deprecated:: 2.1.0
Use :func:`approx_count_distinct` instead.
"""
warnings.warn("Deprecated in 2.1, use approx_count_distinct instead.", DeprecationWarning)
return approx_count_distinct(col, rsd)
def approx_count_distinct(col, rsd=None):
"""Aggregate function: returns a new :class:`Column` for approximate distinct count of
column `col`.
.. versionadded:: 2.1.0
Parameters
----------
col : :class:`Column` or str
rsd : float, optional
maximum relative standard deviation allowed (default = 0.05).
For rsd < 0.01, it is more efficient to use :func:`countDistinct`
Examples
--------
>>> df.agg(approx_count_distinct(df.age).alias('distinct_ages')).collect()
[Row(distinct_ages=2)]
"""
sc = SparkContext._active_spark_context
if rsd is None:
jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col))
else:
jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col), rsd)
return Column(jc)
@since(1.6)
def broadcast(df):
"""Marks a DataFrame as small enough for use in broadcast joins."""
sc = SparkContext._active_spark_context
return DataFrame(sc._jvm.functions.broadcast(df._jdf), df.sql_ctx)
def coalesce(*cols):
"""Returns the first column that is not null.
.. versionadded:: 1.4.0
Examples
--------
>>> cDf = spark.createDataFrame([(None, None), (1, None), (None, 2)], ("a", "b"))
>>> cDf.show()
+----+----+
| a| b|
+----+----+
|null|null|
| 1|null|
|null| 2|
+----+----+
>>> cDf.select(coalesce(cDf["a"], cDf["b"])).show()
+--------------+
|coalesce(a, b)|
+--------------+
| null|
| 1|
| 2|
+--------------+
>>> cDf.select('*', coalesce(cDf["a"], lit(0.0))).show()
+----+----+----------------+
| a| b|coalesce(a, 0.0)|
+----+----+----------------+
|null|null| 0.0|
| 1|null| 1.0|
|null| 2| 0.0|
+----+----+----------------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.coalesce(_to_seq(sc, cols, _to_java_column))
return Column(jc)
def corr(col1, col2):
"""Returns a new :class:`Column` for the Pearson Correlation Coefficient for ``col1``
and ``col2``.
.. versionadded:: 1.6.0
Examples
--------
>>> a = range(20)
>>> b = [2 * x for x in range(20)]
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(corr("a", "b").alias('c')).collect()
[Row(c=1.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.corr(_to_java_column(col1), _to_java_column(col2)))
def covar_pop(col1, col2):
"""Returns a new :class:`Column` for the population covariance of ``col1`` and ``col2``.
.. versionadded:: 2.0.0
Examples
--------
>>> a = [1] * 10
>>> b = [1] * 10
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(covar_pop("a", "b").alias('c')).collect()
[Row(c=0.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.covar_pop(_to_java_column(col1), _to_java_column(col2)))
def covar_samp(col1, col2):
"""Returns a new :class:`Column` for the sample covariance of ``col1`` and ``col2``.
.. versionadded:: 2.0.0
Examples
--------
>>> a = [1] * 10
>>> b = [1] * 10
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(covar_samp("a", "b").alias('c')).collect()
[Row(c=0.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.covar_samp(_to_java_column(col1), _to_java_column(col2)))
def countDistinct(col, *cols):
"""Returns a new :class:`Column` for distinct count of ``col`` or ``cols``.
.. versionadded:: 1.3.0
Examples
--------
>>> df.agg(countDistinct(df.age, df.name).alias('c')).collect()
[Row(c=2)]
>>> df.agg(countDistinct("age", "name").alias('c')).collect()
[Row(c=2)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.countDistinct(_to_java_column(col), _to_seq(sc, cols, _to_java_column))
return Column(jc)
def first(col, ignorenulls=False):
"""Aggregate function: returns the first value in a group.
The function by default returns the first values it sees. It will return the first non-null
value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
.. versionadded:: 1.3.0
Notes
-----
The function is non-deterministic because its results depends on the order of the
rows which may be non-deterministic after a shuffle.
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.first(_to_java_column(col), ignorenulls)
return Column(jc)
def grouping(col):
"""
Aggregate function: indicates whether a specified column in a GROUP BY list is aggregated
or not, returns 1 for aggregated or 0 for not aggregated in the result set.
.. versionadded:: 2.0.0
Examples
--------
>>> df.cube("name").agg(grouping("name"), sum("age")).orderBy("name").show()
+-----+--------------+--------+
| name|grouping(name)|sum(age)|
+-----+--------------+--------+
| null| 1| 7|
|Alice| 0| 2|
| Bob| 0| 5|
+-----+--------------+--------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.grouping(_to_java_column(col))
return Column(jc)
def grouping_id(*cols):
"""
Aggregate function: returns the level of grouping, equals to
(grouping(c1) << (n-1)) + (grouping(c2) << (n-2)) + ... + grouping(cn)
.. versionadded:: 2.0.0
Notes
-----
The list of columns should match with grouping columns exactly, or empty (means all
the grouping columns).
Examples
--------
>>> df.cube("name").agg(grouping_id(), sum("age")).orderBy("name").show()
+-----+-------------+--------+
| name|grouping_id()|sum(age)|
+-----+-------------+--------+
| null| 1| 7|
|Alice| 0| 2|
| Bob| 0| 5|
+-----+-------------+--------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.grouping_id(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.6)
def input_file_name():
"""Creates a string column for the file name of the current Spark task.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.input_file_name())
def isnan(col):
"""An expression that returns true iff the column is NaN.
.. versionadded:: 1.6.0
Examples
--------
>>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b"))
>>> df.select(isnan("a").alias("r1"), isnan(df.a).alias("r2")).collect()
[Row(r1=False, r2=False), Row(r1=True, r2=True)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.isnan(_to_java_column(col)))
def isnull(col):
"""An expression that returns true iff the column is null.
.. versionadded:: 1.6.0
Examples
--------
>>> df = spark.createDataFrame([(1, None), (None, 2)], ("a", "b"))
>>> df.select(isnull("a").alias("r1"), isnull(df.a).alias("r2")).collect()
[Row(r1=False, r2=False), Row(r1=True, r2=True)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.isnull(_to_java_column(col)))
def last(col, ignorenulls=False):
"""Aggregate function: returns the last value in a group.
The function by default returns the last values it sees. It will return the last non-null
value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
.. versionadded:: 1.3.0
Notes
-----
The function is non-deterministic because its results depends on the order of the
rows which may be non-deterministic after a shuffle.
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.last(_to_java_column(col), ignorenulls)
return Column(jc)
def monotonically_increasing_id():
"""A column that generates monotonically increasing 64-bit integers.
The generated ID is guaranteed to be monotonically increasing and unique, but not consecutive.
The current implementation puts the partition ID in the upper 31 bits, and the record number
within each partition in the lower 33 bits. The assumption is that the data frame has
less than 1 billion partitions, and each partition has less than 8 billion records.
.. versionadded:: 1.6.0
Notes
-----
The function is non-deterministic because its result depends on partition IDs.
As an example, consider a :class:`DataFrame` with two partitions, each with 3 records.
This expression would return the following IDs:
0, 1, 2, 8589934592 (1L << 33), 8589934593, 8589934594.
>>> df0 = sc.parallelize(range(2), 2).mapPartitions(lambda x: [(1,), (2,), (3,)]).toDF(['col1'])
>>> df0.select(monotonically_increasing_id().alias('id')).collect()
[Row(id=0), Row(id=1), Row(id=2), Row(id=8589934592), Row(id=8589934593), Row(id=8589934594)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.monotonically_increasing_id())
def nanvl(col1, col2):
"""Returns col1 if it is not NaN, or col2 if col1 is NaN.
Both inputs should be floating point columns (:class:`DoubleType` or :class:`FloatType`).
.. versionadded:: 1.6.0
Examples
--------
>>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b"))
>>> df.select(nanvl("a", "b").alias("r1"), nanvl(df.a, df.b).alias("r2")).collect()
[Row(r1=1.0, r2=1.0), Row(r1=2.0, r2=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.nanvl(_to_java_column(col1), _to_java_column(col2)))
def percentile_approx(col, percentage, accuracy=10000):
"""Returns the approximate `percentile` of the numeric column `col` which is the smallest value
in the ordered `col` values (sorted from least to greatest) such that no more than `percentage`
of `col` values is less than the value or equal to that value.
The value of percentage must be between 0.0 and 1.0.
The accuracy parameter (default: 10000)
is a positive numeric literal which controls approximation accuracy at the cost of memory.
Higher value of accuracy yields better accuracy, 1.0/accuracy is the relative error
of the approximation.
When percentage is an array, each value of the percentage array must be between 0.0 and 1.0.
In this case, returns the approximate percentile array of column col
at the given percentage array.
.. versionadded:: 3.1.0
Examples
--------
>>> key = (col("id") % 3).alias("key")
>>> value = (randn(42) + key * 10).alias("value")
>>> df = spark.range(0, 1000, 1, 1).select(key, value)
>>> df.select(
... percentile_approx("value", [0.25, 0.5, 0.75], 1000000).alias("quantiles")
... ).printSchema()
root
|-- quantiles: array (nullable = true)
| |-- element: double (containsNull = false)
>>> df.groupBy("key").agg(
... percentile_approx("value", 0.5, lit(1000000)).alias("median")
... ).printSchema()
root
|-- key: long (nullable = true)
|-- median: double (nullable = true)
"""
sc = SparkContext._active_spark_context
if isinstance(percentage, (list, tuple)):
# A local list
percentage = sc._jvm.functions.array(_to_seq(sc, [
_create_column_from_literal(x) for x in percentage
]))
elif isinstance(percentage, Column):
# Already a Column
percentage = _to_java_column(percentage)
else:
# Probably scalar
percentage = _create_column_from_literal(percentage)
accuracy = (
_to_java_column(accuracy) if isinstance(accuracy, Column)
else _create_column_from_literal(accuracy)
)
return Column(sc._jvm.functions.percentile_approx(_to_java_column(col), percentage, accuracy))
def rand(seed=None):
"""Generates a random column with independent and identically distributed (i.i.d.) samples
uniformly distributed in [0.0, 1.0).
.. versionadded:: 1.4.0
Notes
-----
The function is non-deterministic in general case.
Examples
--------
>>> df.withColumn('rand', rand(seed=42) * 3).collect()
[Row(age=2, name='Alice', rand=2.4052597283576684),
Row(age=5, name='Bob', rand=2.3913904055683974)]
"""
sc = SparkContext._active_spark_context
if seed is not None:
jc = sc._jvm.functions.rand(seed)
else:
jc = sc._jvm.functions.rand()
return Column(jc)
def randn(seed=None):
"""Generates a column with independent and identically distributed (i.i.d.) samples from
the standard normal distribution.
.. versionadded:: 1.4.0
Notes
-----
The function is non-deterministic in general case.
Examples
--------
>>> df.withColumn('randn', randn(seed=42)).collect()
[Row(age=2, name='Alice', randn=1.1027054481455365),
Row(age=5, name='Bob', randn=0.7400395449950132)]
"""
sc = SparkContext._active_spark_context
if seed is not None:
jc = sc._jvm.functions.randn(seed)
else:
jc = sc._jvm.functions.randn()
return Column(jc)
def round(col, scale=0):
"""
Round the given value to `scale` decimal places using HALF_UP rounding mode if `scale` >= 0
or at integral part when `scale` < 0.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.createDataFrame([(2.5,)], ['a']).select(round('a', 0).alias('r')).collect()
[Row(r=3.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.round(_to_java_column(col), scale))
def bround(col, scale=0):
"""
Round the given value to `scale` decimal places using HALF_EVEN rounding mode if `scale` >= 0
or at integral part when `scale` < 0.
.. versionadded:: 2.0.0
Examples
--------
>>> spark.createDataFrame([(2.5,)], ['a']).select(bround('a', 0).alias('r')).collect()
[Row(r=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.bround(_to_java_column(col), scale))
def shiftLeft(col, numBits):
"""Shift the given value numBits left.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.createDataFrame([(21,)], ['a']).select(shiftLeft('a', 1).alias('r')).collect()
[Row(r=42)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.shiftLeft(_to_java_column(col), numBits))
def shiftRight(col, numBits):
"""(Signed) shift the given value numBits right.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.createDataFrame([(42,)], ['a']).select(shiftRight('a', 1).alias('r')).collect()
[Row(r=21)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.shiftRight(_to_java_column(col), numBits)
return Column(jc)
def shiftRightUnsigned(col, numBits):
"""Unsigned shift the given value numBits right.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([(-42,)], ['a'])
>>> df.select(shiftRightUnsigned('a', 1).alias('r')).collect()
[Row(r=9223372036854775787)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.shiftRightUnsigned(_to_java_column(col), numBits)
return Column(jc)
def spark_partition_id():
"""A column for partition ID.
.. versionadded:: 1.6.0
Notes
-----
This is indeterministic because it depends on data partitioning and task scheduling.
Examples
--------
>>> df.repartition(1).select(spark_partition_id().alias("pid")).collect()
[Row(pid=0), Row(pid=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.spark_partition_id())
def expr(str):
"""Parses the expression string into the column that it represents
.. versionadded:: 1.5.0
Examples
--------
>>> df.select(expr("length(name)")).collect()
[Row(length(name)=5), Row(length(name)=3)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.expr(str))
def struct(*cols):
"""Creates a new struct column.
.. versionadded:: 1.4.0
Parameters
----------
cols : list, set, str or :class:`Column`
column names or :class:`Column`\\s to contain in the output struct.
Examples
--------
>>> df.select(struct('age', 'name').alias("struct")).collect()
[Row(struct=Row(age=2, name='Alice')), Row(struct=Row(age=5, name='Bob'))]
>>> df.select(struct([df.age, df.name]).alias("struct")).collect()
[Row(struct=Row(age=2, name='Alice')), Row(struct=Row(age=5, name='Bob'))]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.struct(_to_seq(sc, cols, _to_java_column))
return Column(jc)
def greatest(*cols):
"""
Returns the greatest value of the list of column names, skipping null values.
This function takes at least 2 parameters. It will return null iff all parameters are null.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([(1, 4, 3)], ['a', 'b', 'c'])
>>> df.select(greatest(df.a, df.b, df.c).alias("greatest")).collect()
[Row(greatest=4)]
"""
if len(cols) < 2:
raise ValueError("greatest should take at least two columns")
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.greatest(_to_seq(sc, cols, _to_java_column)))
def least(*cols):
"""
Returns the least value of the list of column names, skipping null values.
This function takes at least 2 parameters. It will return null iff all parameters are null.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([(1, 4, 3)], ['a', 'b', 'c'])
>>> df.select(least(df.a, df.b, df.c).alias("least")).collect()
[Row(least=1)]
"""
if len(cols) < 2:
raise ValueError("least should take at least two columns")
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.least(_to_seq(sc, cols, _to_java_column)))
def when(condition, value):
"""Evaluates a list of conditions and returns one of multiple possible result expressions.
If :func:`Column.otherwise` is not invoked, None is returned for unmatched conditions.
.. versionadded:: 1.4.0
Parameters
----------
condition : :class:`Column`
a boolean :class:`Column` expression.
value :
a literal value, or a :class:`Column` expression.
>>> df.select(when(df['age'] == 2, 3).otherwise(4).alias("age")).collect()
[Row(age=3), Row(age=4)]
>>> df.select(when(df.age == 2, df.age + 1).alias("age")).collect()
[Row(age=3), Row(age=None)]
"""
sc = SparkContext._active_spark_context
if not isinstance(condition, Column):
raise TypeError("condition should be a Column")
v = value._jc if isinstance(value, Column) else value
jc = sc._jvm.functions.when(condition._jc, v)
return Column(jc)
def log(arg1, arg2=None):
"""Returns the first argument-based logarithm of the second argument.
If there is only one argument, then this takes the natural logarithm of the argument.
.. versionadded:: 1.5.0
Examples
--------
>>> df.select(log(10.0, df.age).alias('ten')).rdd.map(lambda l: str(l.ten)[:7]).collect()
['0.30102', '0.69897']
>>> df.select(log(df.age).alias('e')).rdd.map(lambda l: str(l.e)[:7]).collect()
['0.69314', '1.60943']
"""
sc = SparkContext._active_spark_context
if arg2 is None:
jc = sc._jvm.functions.log(_to_java_column(arg1))
else:
jc = sc._jvm.functions.log(arg1, _to_java_column(arg2))
return Column(jc)
def log2(col):
"""Returns the base-2 logarithm of the argument.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.createDataFrame([(4,)], ['a']).select(log2('a').alias('log2')).collect()
[Row(log2=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.log2(_to_java_column(col)))
def conv(col, fromBase, toBase):
"""
Convert a number in a string column from one base to another.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([("010101",)], ['n'])
>>> df.select(conv(df.n, 2, 16).alias('hex')).collect()
[Row(hex='15')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.conv(_to_java_column(col), fromBase, toBase))
def factorial(col):
"""
Computes the factorial of the given value.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([(5,)], ['n'])
>>> df.select(factorial(df.n).alias('f')).collect()
[Row(f=120)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.factorial(_to_java_column(col)))
# --------------- Window functions ------------------------
def lag(col, offset=1, default=None):
"""
Window function: returns the value that is `offset` rows before the current row, and
`defaultValue` if there is less than `offset` rows before the current row. For example,
an `offset` of one will return the previous row at any given point in the window partition.
This is equivalent to the LAG function in SQL.
.. versionadded:: 1.4.0
Parameters
----------
col : :class:`Column` or str
name of column or expression
offset : int, optional
number of row to extend
default : optional
default value
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lag(_to_java_column(col), offset, default))
def lead(col, offset=1, default=None):
"""
Window function: returns the value that is `offset` rows after the current row, and
`defaultValue` if there is less than `offset` rows after the current row. For example,
an `offset` of one will return the next row at any given point in the window partition.
This is equivalent to the LEAD function in SQL.
.. versionadded:: 1.4.0
Parameters
----------
col : :class:`Column` or str
name of column or expression
offset : int, optional
number of row to extend
default : optional
default value
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lead(_to_java_column(col), offset, default))
def nth_value(col, offset, ignoreNulls=False):
"""
Window function: returns the value that is the `offset`\\th row of the window frame
(counting from 1), and `null` if the size of window frame is less than `offset` rows.
It will return the `offset`\\th non-null value it sees when `ignoreNulls` is set to
true. If all values are null, then null is returned.
This is equivalent to the nth_value function in SQL.
.. versionadded:: 3.1.0
Parameters
----------
col : :class:`Column` or str
name of column or expression
offset : int, optional
number of row to use as the value
ignoreNulls : bool, optional
indicates the Nth value should skip null in the
determination of which row to use
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.nth_value(_to_java_column(col), offset, ignoreNulls))
def ntile(n):
"""
Window function: returns the ntile group id (from 1 to `n` inclusive)
in an ordered window partition. For example, if `n` is 4, the first
quarter of the rows will get value 1, the second quarter will get 2,
the third quarter will get 3, and the last quarter will get 4.
This is equivalent to the NTILE function in SQL.
.. versionadded:: 1.4.0
Parameters
----------
n : int
an integer
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.ntile(int(n)))
# ---------------------- Date/Timestamp functions ------------------------------
@since(1.5)
def current_date():
"""
Returns the current date at the start of query evaluation as a :class:`DateType` column.
All calls of current_date within the same query return the same value.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.current_date())
def current_timestamp():
"""
Returns the current timestamp at the start of query evaluation as a :class:`TimestampType`
column. All calls of current_timestamp within the same query return the same value.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.current_timestamp())
def date_format(date, format):
"""
Converts a date/timestamp/string to a value of string in the format specified by the date
format given by the second argument.
A pattern could be for instance `dd.MM.yyyy` and could return a string like '18.03.1993'. All
pattern letters of `datetime pattern`_. can be used.
.. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html
.. versionadded:: 1.5.0
Notes
-----
Whenever possible, use specialized functions like `year`.
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_format('dt', 'MM/dd/yyy').alias('date')).collect()
[Row(date='04/08/2015')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_format(_to_java_column(date), format))
def year(col):
"""
Extract the year of a given date as integer.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(year('dt').alias('year')).collect()
[Row(year=2015)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.year(_to_java_column(col)))
def quarter(col):
"""
Extract the quarter of a given date as integer.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(quarter('dt').alias('quarter')).collect()
[Row(quarter=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.quarter(_to_java_column(col)))
def month(col):
"""
Extract the month of a given date as integer.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(month('dt').alias('month')).collect()
[Row(month=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.month(_to_java_column(col)))
def dayofweek(col):
"""
Extract the day of the week of a given date as integer.
.. versionadded:: 2.3.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofweek('dt').alias('day')).collect()
[Row(day=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofweek(_to_java_column(col)))
def dayofmonth(col):
"""
Extract the day of the month of a given date as integer.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofmonth('dt').alias('day')).collect()
[Row(day=8)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofmonth(_to_java_column(col)))
def dayofyear(col):
"""
Extract the day of the year of a given date as integer.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofyear('dt').alias('day')).collect()
[Row(day=98)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofyear(_to_java_column(col)))
def hour(col):
"""
Extract the hours of a given date as integer.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(hour('ts').alias('hour')).collect()
[Row(hour=13)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.hour(_to_java_column(col)))
def minute(col):
"""
Extract the minutes of a given date as integer.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(minute('ts').alias('minute')).collect()
[Row(minute=8)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.minute(_to_java_column(col)))
def second(col):
"""
Extract the seconds of a given date as integer.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(second('ts').alias('second')).collect()
[Row(second=15)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.second(_to_java_column(col)))
def weekofyear(col):
"""
Extract the week number of a given date as integer.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(weekofyear(df.dt).alias('week')).collect()
[Row(week=15)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.weekofyear(_to_java_column(col)))
def date_add(start, days):
"""
Returns the date that is `days` days after `start`
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_add(df.dt, 1).alias('next_date')).collect()
[Row(next_date=datetime.date(2015, 4, 9))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_add(_to_java_column(start), days))
def date_sub(start, days):
"""
Returns the date that is `days` days before `start`
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_sub(df.dt, 1).alias('prev_date')).collect()
[Row(prev_date=datetime.date(2015, 4, 7))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_sub(_to_java_column(start), days))
def datediff(end, start):
"""
Returns the number of days from `start` to `end`.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08','2015-05-10')], ['d1', 'd2'])
>>> df.select(datediff(df.d2, df.d1).alias('diff')).collect()
[Row(diff=32)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.datediff(_to_java_column(end), _to_java_column(start)))
def add_months(start, months):
"""
Returns the date that is `months` months after `start`
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(add_months(df.dt, 1).alias('next_month')).collect()
[Row(next_month=datetime.date(2015, 5, 8))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.add_months(_to_java_column(start), months))
def months_between(date1, date2, roundOff=True):
"""
Returns number of months between dates date1 and date2.
If date1 is later than date2, then the result is positive.
If date1 and date2 are on the same day of month, or both are the last day of month,
returns an integer (time of day will be ignored).
The result is rounded off to 8 digits unless `roundOff` is set to `False`.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', '1996-10-30')], ['date1', 'date2'])
>>> df.select(months_between(df.date1, df.date2).alias('months')).collect()
[Row(months=3.94959677)]
>>> df.select(months_between(df.date1, df.date2, False).alias('months')).collect()
[Row(months=3.9495967741935485)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.months_between(
_to_java_column(date1), _to_java_column(date2), roundOff))
def to_date(col, format=None):
"""Converts a :class:`Column` into :class:`pyspark.sql.types.DateType`
using the optionally specified format. Specify formats according to `datetime pattern`_.
By default, it follows casting rules to :class:`pyspark.sql.types.DateType` if the format
is omitted. Equivalent to ``col.cast("date")``.
.. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html
.. versionadded:: 2.2.0
Examples
--------
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_date(df.t).alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_date(df.t, 'yyyy-MM-dd HH:mm:ss').alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
"""
sc = SparkContext._active_spark_context
if format is None:
jc = sc._jvm.functions.to_date(_to_java_column(col))
else:
jc = sc._jvm.functions.to_date(_to_java_column(col), format)
return Column(jc)
def to_timestamp(col, format=None):
"""Converts a :class:`Column` into :class:`pyspark.sql.types.TimestampType`
using the optionally specified format. Specify formats according to `datetime pattern`_.
By default, it follows casting rules to :class:`pyspark.sql.types.TimestampType` if the format
is omitted. Equivalent to ``col.cast("timestamp")``.
.. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html
.. versionadded:: 2.2.0
Examples
--------
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_timestamp(df.t).alias('dt')).collect()
[Row(dt=datetime.datetime(1997, 2, 28, 10, 30))]
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_timestamp(df.t, 'yyyy-MM-dd HH:mm:ss').alias('dt')).collect()
[Row(dt=datetime.datetime(1997, 2, 28, 10, 30))]
"""
sc = SparkContext._active_spark_context
if format is None:
jc = sc._jvm.functions.to_timestamp(_to_java_column(col))
else:
jc = sc._jvm.functions.to_timestamp(_to_java_column(col), format)
return Column(jc)
def trunc(date, format):
"""
Returns date truncated to the unit specified by the format.
.. versionadded:: 1.5.0
Parameters
----------
date : :class:`Column` or str
format : str
'year', 'yyyy', 'yy' or 'month', 'mon', 'mm'
Examples
--------
>>> df = spark.createDataFrame([('1997-02-28',)], ['d'])
>>> df.select(trunc(df.d, 'year').alias('year')).collect()
[Row(year=datetime.date(1997, 1, 1))]
>>> df.select(trunc(df.d, 'mon').alias('month')).collect()
[Row(month=datetime.date(1997, 2, 1))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.trunc(_to_java_column(date), format))
def date_trunc(format, timestamp):
"""
Returns timestamp truncated to the unit specified by the format.
.. versionadded:: 2.3.0
Parameters
----------
format : str
'year', 'yyyy', 'yy', 'month', 'mon', 'mm',
'day', 'dd', 'hour', 'minute', 'second', 'week', 'quarter'
timestamp : :class:`Column` or str
Examples
--------
>>> df = spark.createDataFrame([('1997-02-28 05:02:11',)], ['t'])
>>> df.select(date_trunc('year', df.t).alias('year')).collect()
[Row(year=datetime.datetime(1997, 1, 1, 0, 0))]
>>> df.select(date_trunc('mon', df.t).alias('month')).collect()
[Row(month=datetime.datetime(1997, 2, 1, 0, 0))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_trunc(format, _to_java_column(timestamp)))
def next_day(date, dayOfWeek):
"""
Returns the first date which is later than the value of the date column.
Day of the week parameter is case insensitive, and accepts:
"Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun".
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-07-27',)], ['d'])
>>> df.select(next_day(df.d, 'Sun').alias('date')).collect()
[Row(date=datetime.date(2015, 8, 2))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.next_day(_to_java_column(date), dayOfWeek))
def last_day(date):
"""
Returns the last day of the month which the given date belongs to.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('1997-02-10',)], ['d'])
>>> df.select(last_day(df.d).alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.last_day(_to_java_column(date)))
def from_unixtime(timestamp, format="yyyy-MM-dd HH:mm:ss"):
"""
Converts the number of seconds from unix epoch (1970-01-01 00:00:00 UTC) to a string
representing the timestamp of that moment in the current system time zone in the given
format.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles")
>>> time_df = spark.createDataFrame([(1428476400,)], ['unix_time'])
>>> time_df.select(from_unixtime('unix_time').alias('ts')).collect()
[Row(ts='2015-04-08 00:00:00')]
>>> spark.conf.unset("spark.sql.session.timeZone")
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.from_unixtime(_to_java_column(timestamp), format))
def unix_timestamp(timestamp=None, format='yyyy-MM-dd HH:mm:ss'):
"""
Convert time string with given pattern ('yyyy-MM-dd HH:mm:ss', by default)
to Unix time stamp (in seconds), using the default timezone and the default
locale, return null if fail.
if `timestamp` is None, then it returns current timestamp.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles")
>>> time_df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> time_df.select(unix_timestamp('dt', 'yyyy-MM-dd').alias('unix_time')).collect()
[Row(unix_time=1428476400)]
>>> spark.conf.unset("spark.sql.session.timeZone")
"""
sc = SparkContext._active_spark_context
if timestamp is None:
return Column(sc._jvm.functions.unix_timestamp())
return Column(sc._jvm.functions.unix_timestamp(_to_java_column(timestamp), format))
def from_utc_timestamp(timestamp, tz):
"""
This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function
takes a timestamp which is timezone-agnostic, and interprets it as a timestamp in UTC, and
renders that timestamp as a timestamp in the given time zone.
However, timestamp in Spark represents number of microseconds from the Unix epoch, which is not
timezone-agnostic. So in Spark this function just shift the timestamp value from UTC timezone to
the given timezone.
This function may return confusing result if the input is a string with timezone, e.g.
'2018-03-13T06:18:23+00:00'. The reason is that, Spark firstly cast the string to timestamp
according to the timezone in the string, and finally display the result by converting the
timestamp to string according to the session local timezone.
.. versionadded:: 1.5.0
Parameters
----------
timestamp : :class:`Column` or str
the column that contains timestamps
tz : :class:`Column` or str
A string detailing the time zone ID that the input should be adjusted to. It should
be in the format of either region-based zone IDs or zone offsets. Region IDs must
have the form 'area/city', such as 'America/Los_Angeles'. Zone offsets must be in
the format '(+|-)HH:mm', for example '-08:00' or '+01:00'. Also 'UTC' and 'Z' are
supported as aliases of '+00:00'. Other short names are not recommended to use
because they can be ambiguous.
.. versionchanged:: 2.4
`tz` can take a :class:`Column` containing timezone ID strings.
Examples
--------
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', 'JST')], ['ts', 'tz'])
>>> df.select(from_utc_timestamp(df.ts, "PST").alias('local_time')).collect()
[Row(local_time=datetime.datetime(1997, 2, 28, 2, 30))]
>>> df.select(from_utc_timestamp(df.ts, df.tz).alias('local_time')).collect()
[Row(local_time=datetime.datetime(1997, 2, 28, 19, 30))]
"""
sc = SparkContext._active_spark_context
if isinstance(tz, Column):
tz = _to_java_column(tz)
return Column(sc._jvm.functions.from_utc_timestamp(_to_java_column(timestamp), tz))
def to_utc_timestamp(timestamp, tz):
"""
This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function
takes a timestamp which is timezone-agnostic, and interprets it as a timestamp in the given
timezone, and renders that timestamp as a timestamp in UTC.
However, timestamp in Spark represents number of microseconds from the Unix epoch, which is not
timezone-agnostic. So in Spark this function just shift the timestamp value from the given
timezone to UTC timezone.
This function may return confusing result if the input is a string with timezone, e.g.
'2018-03-13T06:18:23+00:00'. The reason is that, Spark firstly cast the string to timestamp
according to the timezone in the string, and finally display the result by converting the
timestamp to string according to the session local timezone.
.. versionadded:: 1.5.0
Parameters
----------
timestamp : :class:`Column` or str
the column that contains timestamps
tz : :class:`Column` or str
A string detailing the time zone ID that the input should be adjusted to. It should
be in the format of either region-based zone IDs or zone offsets. Region IDs must
have the form 'area/city', such as 'America/Los_Angeles'. Zone offsets must be in
the format '(+|-)HH:mm', for example '-08:00' or '+01:00'. Also 'UTC' and 'Z' are
upported as aliases of '+00:00'. Other short names are not recommended to use
because they can be ambiguous.
.. versionchanged:: 2.4.0
`tz` can take a :class:`Column` containing timezone ID strings.
Examples
--------
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', 'JST')], ['ts', 'tz'])
>>> df.select(to_utc_timestamp(df.ts, "PST").alias('utc_time')).collect()
[Row(utc_time=datetime.datetime(1997, 2, 28, 18, 30))]
>>> df.select(to_utc_timestamp(df.ts, df.tz).alias('utc_time')).collect()
[Row(utc_time=datetime.datetime(1997, 2, 28, 1, 30))]
"""
sc = SparkContext._active_spark_context
if isinstance(tz, Column):
tz = _to_java_column(tz)
return Column(sc._jvm.functions.to_utc_timestamp(_to_java_column(timestamp), tz))
def timestamp_seconds(col):
"""
.. versionadded:: 3.1.0
Examples
--------
>>> from pyspark.sql.functions import timestamp_seconds
>>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles")
>>> time_df = spark.createDataFrame([(1230219000,)], ['unix_time'])
>>> time_df.select(timestamp_seconds(time_df.unix_time).alias('ts')).show()
+-------------------+
| ts|
+-------------------+
|2008-12-25 07:30:00|
+-------------------+
>>> spark.conf.unset("spark.sql.session.timeZone")
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.timestamp_seconds(_to_java_column(col)))
def window(timeColumn, windowDuration, slideDuration=None, startTime=None):
"""Bucketize rows into one or more time windows given a timestamp specifying column. Window
starts are inclusive but the window ends are exclusive, e.g. 12:05 will be in the window
[12:05,12:10) but not in [12:00,12:05). Windows can support microsecond precision. Windows in
the order of months are not supported.
The time column must be of :class:`pyspark.sql.types.TimestampType`.
Durations are provided as strings, e.g. '1 second', '1 day 12 hours', '2 minutes'. Valid
interval strings are 'week', 'day', 'hour', 'minute', 'second', 'millisecond', 'microsecond'.
If the ``slideDuration`` is not provided, the windows will be tumbling windows.
The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start
window intervals. For example, in order to have hourly tumbling windows that start 15 minutes
past the hour, e.g. 12:15-13:15, 13:15-14:15... provide `startTime` as `15 minutes`.
The output column will be a struct called 'window' by default with the nested columns 'start'
and 'end', where 'start' and 'end' will be of :class:`pyspark.sql.types.TimestampType`.
.. versionadded:: 2.0.0
Examples
--------
>>> df = spark.createDataFrame([("2016-03-11 09:00:07", 1)]).toDF("date", "val")
>>> w = df.groupBy(window("date", "5 seconds")).agg(sum("val").alias("sum"))
>>> w.select(w.window.start.cast("string").alias("start"),
... w.window.end.cast("string").alias("end"), "sum").collect()
[Row(start='2016-03-11 09:00:05', end='2016-03-11 09:00:10', sum=1)]
"""
def check_string_field(field, fieldName):
if not field or type(field) is not str:
raise TypeError("%s should be provided as a string" % fieldName)
sc = SparkContext._active_spark_context
time_col = _to_java_column(timeColumn)
check_string_field(windowDuration, "windowDuration")
if slideDuration and startTime:
check_string_field(slideDuration, "slideDuration")
check_string_field(startTime, "startTime")
res = sc._jvm.functions.window(time_col, windowDuration, slideDuration, startTime)
elif slideDuration:
check_string_field(slideDuration, "slideDuration")
res = sc._jvm.functions.window(time_col, windowDuration, slideDuration)
elif startTime:
check_string_field(startTime, "startTime")
res = sc._jvm.functions.window(time_col, windowDuration, windowDuration, startTime)
else:
res = sc._jvm.functions.window(time_col, windowDuration)
return Column(res)
# ---------------------------- misc functions ----------------------------------
def crc32(col):
"""
Calculates the cyclic redundancy check value (CRC32) of a binary column and
returns the value as a bigint.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.createDataFrame([('ABC',)], ['a']).select(crc32('a').alias('crc32')).collect()
[Row(crc32=2743272264)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.crc32(_to_java_column(col)))
def md5(col):
"""Calculates the MD5 digest and returns the value as a 32 character hex string.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.createDataFrame([('ABC',)], ['a']).select(md5('a').alias('hash')).collect()
[Row(hash='902fbdd2b1df0c4f70b4a5d23525e932')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.md5(_to_java_column(col))
return Column(jc)
def sha1(col):
"""Returns the hex string result of SHA-1.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.createDataFrame([('ABC',)], ['a']).select(sha1('a').alias('hash')).collect()
[Row(hash='3c01bdbb26f358bab27f267924aa2c9a03fcfdb8')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.sha1(_to_java_column(col))
return Column(jc)
def sha2(col, numBits):
"""Returns the hex string result of SHA-2 family of hash functions (SHA-224, SHA-256, SHA-384,
and SHA-512). The numBits indicates the desired bit length of the result, which must have a
value of 224, 256, 384, 512, or 0 (which is equivalent to 256).
.. versionadded:: 1.5.0
Examples
--------
>>> digests = df.select(sha2(df.name, 256).alias('s')).collect()
>>> digests[0]
Row(s='3bc51062973c458d5a6f2d8d64a023246354ad7e064b1e4e009ec8a0699a3043')
>>> digests[1]
Row(s='cd9fb1e148ccd8442e5aa74904cc73bf6fb54d1d54d333bd596aa9bb4bb4e961')
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.sha2(_to_java_column(col), numBits)
return Column(jc)
def hash(*cols):
"""Calculates the hash code of given columns, and returns the result as an int column.
.. versionadded:: 2.0.0
Examples
--------
>>> spark.createDataFrame([('ABC',)], ['a']).select(hash('a').alias('hash')).collect()
[Row(hash=-757602832)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.hash(_to_seq(sc, cols, _to_java_column))
return Column(jc)
def xxhash64(*cols):
"""Calculates the hash code of given columns using the 64-bit variant of the xxHash algorithm,
and returns the result as a long column.
.. versionadded:: 3.0.0
Examples
--------
>>> spark.createDataFrame([('ABC',)], ['a']).select(xxhash64('a').alias('hash')).collect()
[Row(hash=4105715581806190027)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.xxhash64(_to_seq(sc, cols, _to_java_column))
return Column(jc)
def assert_true(col, errMsg=None):
"""
Returns null if the input column is true; throws an exception with the provided error message
otherwise.
.. versionadded:: 3.1.0
Examples
--------
>>> df = spark.createDataFrame([(0,1)], ['a', 'b'])
>>> df.select(assert_true(df.a < df.b).alias('r')).collect()
[Row(r=None)]
>>> df = spark.createDataFrame([(0,1)], ['a', 'b'])
>>> df.select(assert_true(df.a < df.b, df.a).alias('r')).collect()
[Row(r=None)]
>>> df = spark.createDataFrame([(0,1)], ['a', 'b'])
>>> df.select(assert_true(df.a < df.b, 'error').alias('r')).collect()
[Row(r=None)]
"""
sc = SparkContext._active_spark_context
if errMsg is None:
return Column(sc._jvm.functions.assert_true(_to_java_column(col)))
if not isinstance(errMsg, (str, Column)):
raise TypeError(
"errMsg should be a Column or a str, got {}".format(type(errMsg))
)
errMsg = (
_create_column_from_literal(errMsg)
if isinstance(errMsg, str)
else _to_java_column(errMsg)
)
return Column(sc._jvm.functions.assert_true(_to_java_column(col), errMsg))
@since(3.1)
def raise_error(errMsg):
"""
Throws an exception with the provided error message.
"""
if not isinstance(errMsg, (str, Column)):
raise TypeError(
"errMsg should be a Column or a str, got {}".format(type(errMsg))
)
sc = SparkContext._active_spark_context
errMsg = (
_create_column_from_literal(errMsg)
if isinstance(errMsg, str)
else _to_java_column(errMsg)
)
return Column(sc._jvm.functions.raise_error(errMsg))
# ---------------------- String/Binary functions ------------------------------
@since(1.5)
def upper(col):
"""
Converts a string expression to upper case.
"""
return _invoke_function_over_column("upper", col)
@since(1.5)
def lower(col):
"""
Converts a string expression to lower case.
"""
return _invoke_function_over_column("lower", col)
@since(1.5)
def ascii(col):
"""
Computes the numeric value of the first character of the string column.
"""
return _invoke_function_over_column("ascii", col)
@since(1.5)
def base64(col):
"""
Computes the BASE64 encoding of a binary column and returns it as a string column.
"""
return _invoke_function_over_column("base64", col)
@since(1.5)
def unbase64(col):
"""
Decodes a BASE64 encoded string column and returns it as a binary column.
"""
return _invoke_function_over_column("unbase64", col)
@since(1.5)
def ltrim(col):
"""
Trim the spaces from left end for the specified string value.
"""
return _invoke_function_over_column("ltrim", col)
@since(1.5)
def rtrim(col):
"""
Trim the spaces from right end for the specified string value.
"""
return _invoke_function_over_column("rtrim", col)
@since(1.5)
def trim(col):
"""
Trim the spaces from both ends for the specified string column.
"""
return _invoke_function_over_column("trim", col)
def concat_ws(sep, *cols):
"""
Concatenates multiple input string columns together into a single string column,
using the given separator.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('abcd','123')], ['s', 'd'])
>>> df.select(concat_ws('-', df.s, df.d).alias('s')).collect()
[Row(s='abcd-123')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.concat_ws(sep, _to_seq(sc, cols, _to_java_column)))
@since(1.5)
def decode(col, charset):
"""
Computes the first argument into a string from a binary using the provided character set
(one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.decode(_to_java_column(col), charset))
@since(1.5)
def encode(col, charset):
"""
Computes the first argument into a binary from a string using the provided character set
(one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.encode(_to_java_column(col), charset))
def format_number(col, d):
"""
Formats the number X to a format like '#,--#,--#.--', rounded to d decimal places
with HALF_EVEN round mode, and returns the result as a string.
.. versionadded:: 1.5.0
Parameters
----------
col : :class:`Column` or str
the column name of the numeric value to be formatted
d : int
the N decimal places
>>> spark.createDataFrame([(5,)], ['a']).select(format_number('a', 4).alias('v')).collect()
[Row(v='5.0000')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.format_number(_to_java_column(col), d))
def format_string(format, *cols):
"""
Formats the arguments in printf-style and returns the result as a string column.
.. versionadded:: 1.5.0
Parameters
----------
format : str
string that can contain embedded format tags and used as result column's value
cols : :class:`Column` or str
column names or :class:`Column`\\s to be used in formatting
Examples
--------
>>> df = spark.createDataFrame([(5, "hello")], ['a', 'b'])
>>> df.select(format_string('%d %s', df.a, df.b).alias('v')).collect()
[Row(v='5 hello')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.format_string(format, _to_seq(sc, cols, _to_java_column)))
def instr(str, substr):
"""
Locate the position of the first occurrence of substr column in the given string.
Returns null if either of the arguments are null.
.. versionadded:: 1.5.0
Notes
-----
The position is not zero based, but 1 based index. Returns 0 if substr
could not be found in str.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(instr(df.s, 'b').alias('s')).collect()
[Row(s=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.instr(_to_java_column(str), substr))
def overlay(src, replace, pos, len=-1):
"""
Overlay the specified portion of `src` with `replace`,
starting from byte position `pos` of `src` and proceeding for `len` bytes.
.. versionadded:: 3.0.0
Examples
--------
>>> df = spark.createDataFrame([("SPARK_SQL", "CORE")], ("x", "y"))
>>> df.select(overlay("x", "y", 7).alias("overlayed")).show()
+----------+
| overlayed|
+----------+
|SPARK_CORE|
+----------+
"""
if not isinstance(pos, (int, str, Column)):
raise TypeError(
"pos should be an integer or a Column / column name, got {}".format(type(pos)))
if len is not None and not isinstance(len, (int, str, Column)):
raise TypeError(
"len should be an integer or a Column / column name, got {}".format(type(len)))
pos = _create_column_from_literal(pos) if isinstance(pos, int) else _to_java_column(pos)
len = _create_column_from_literal(len) if isinstance(len, int) else _to_java_column(len)
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.overlay(
_to_java_column(src),
_to_java_column(replace),
pos,
len
))
def substring(str, pos, len):
"""
Substring starts at `pos` and is of length `len` when str is String type or
returns the slice of byte array that starts at `pos` in byte and is of length `len`
when str is Binary type.
.. versionadded:: 1.5.0
Notes
-----
The position is not zero based, but 1 based index.
Examples
--------
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(substring(df.s, 1, 2).alias('s')).collect()
[Row(s='ab')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.substring(_to_java_column(str), pos, len))
def substring_index(str, delim, count):
"""
Returns the substring from string str before count occurrences of the delimiter delim.
If count is positive, everything the left of the final delimiter (counting from left) is
returned. If count is negative, every to the right of the final delimiter (counting from the
right) is returned. substring_index performs a case-sensitive match when searching for delim.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('a.b.c.d',)], ['s'])
>>> df.select(substring_index(df.s, '.', 2).alias('s')).collect()
[Row(s='a.b')]
>>> df.select(substring_index(df.s, '.', -3).alias('s')).collect()
[Row(s='b.c.d')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.substring_index(_to_java_column(str), delim, count))
def levenshtein(left, right):
"""Computes the Levenshtein distance of the two given strings.
.. versionadded:: 1.5.0
Examples
--------
>>> df0 = spark.createDataFrame([('kitten', 'sitting',)], ['l', 'r'])
>>> df0.select(levenshtein('l', 'r').alias('d')).collect()
[Row(d=3)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.levenshtein(_to_java_column(left), _to_java_column(right))
return Column(jc)
def locate(substr, str, pos=1):
"""
Locate the position of the first occurrence of substr in a string column, after position pos.
.. versionadded:: 1.5.0
Parameters
----------
substr : str
a string
str : :class:`Column` or str
a Column of :class:`pyspark.sql.types.StringType`
pos : int, optional
start position (zero based)
Notes
-----
The position is not zero based, but 1 based index. Returns 0 if substr
could not be found in str.
Examples
--------
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(locate('b', df.s, 1).alias('s')).collect()
[Row(s=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.locate(substr, _to_java_column(str), pos))
def lpad(col, len, pad):
"""
Left-pad the string column to width `len` with `pad`.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(lpad(df.s, 6, '#').alias('s')).collect()
[Row(s='##abcd')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lpad(_to_java_column(col), len, pad))
def rpad(col, len, pad):
"""
Right-pad the string column to width `len` with `pad`.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(rpad(df.s, 6, '#').alias('s')).collect()
[Row(s='abcd##')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.rpad(_to_java_column(col), len, pad))
def repeat(col, n):
"""
Repeats a string column n times, and returns it as a new string column.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('ab',)], ['s',])
>>> df.select(repeat(df.s, 3).alias('s')).collect()
[Row(s='ababab')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.repeat(_to_java_column(col), n))
def split(str, pattern, limit=-1):
"""
Splits str around matches of the given pattern.
.. versionadded:: 1.5.0
Parameters
----------
str : :class:`Column` or str
a string expression to split
pattern : str
a string representing a regular expression. The regex string should be
a Java regular expression.
limit : int, optional
an integer which controls the number of times `pattern` is applied.
* ``limit > 0``: The resulting array's length will not be more than `limit`, and the
resulting array's last entry will contain all input beyond the last
matched pattern.
* ``limit <= 0``: `pattern` will be applied as many times as possible, and the resulting
array can be of any size.
.. versionchanged:: 3.0
`split` now takes an optional `limit` field. If not provided, default limit value is -1.
Examples
--------
>>> df = spark.createDataFrame([('oneAtwoBthreeC',)], ['s',])
>>> df.select(split(df.s, '[ABC]', 2).alias('s')).collect()
[Row(s=['one', 'twoBthreeC'])]
>>> df.select(split(df.s, '[ABC]', -1).alias('s')).collect()
[Row(s=['one', 'two', 'three', ''])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.split(_to_java_column(str), pattern, limit))
def regexp_extract(str, pattern, idx):
r"""Extract a specific group matched by a Java regex, from the specified string column.
If the regex did not match, or the specified group did not match, an empty string is returned.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('100-200',)], ['str'])
>>> df.select(regexp_extract('str', r'(\d+)-(\d+)', 1).alias('d')).collect()
[Row(d='100')]
>>> df = spark.createDataFrame([('foo',)], ['str'])
>>> df.select(regexp_extract('str', r'(\d+)', 1).alias('d')).collect()
[Row(d='')]
>>> df = spark.createDataFrame([('aaaac',)], ['str'])
>>> df.select(regexp_extract('str', '(a+)(b)?(c)', 2).alias('d')).collect()
[Row(d='')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.regexp_extract(_to_java_column(str), pattern, idx)
return Column(jc)
def regexp_replace(str, pattern, replacement):
r"""Replace all substrings of the specified string value that match regexp with rep.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('100-200',)], ['str'])
>>> df.select(regexp_replace('str', r'(\d+)', '--').alias('d')).collect()
[Row(d='-----')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.regexp_replace(_to_java_column(str), pattern, replacement)
return Column(jc)
def initcap(col):
"""Translate the first letter of each word to upper case in the sentence.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.createDataFrame([('ab cd',)], ['a']).select(initcap("a").alias('v')).collect()
[Row(v='Ab Cd')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.initcap(_to_java_column(col)))
def soundex(col):
"""
Returns the SoundEx encoding for a string
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([("Peters",),("Uhrbach",)], ['name'])
>>> df.select(soundex(df.name).alias("soundex")).collect()
[Row(soundex='P362'), Row(soundex='U612')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.soundex(_to_java_column(col)))
def bin(col):
"""Returns the string representation of the binary value of the given column.
.. versionadded:: 1.5.0
Examples
--------
>>> df.select(bin(df.age).alias('c')).collect()
[Row(c='10'), Row(c='101')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.bin(_to_java_column(col))
return Column(jc)
def hex(col):
"""Computes hex value of the given column, which could be :class:`pyspark.sql.types.StringType`,
:class:`pyspark.sql.types.BinaryType`, :class:`pyspark.sql.types.IntegerType` or
:class:`pyspark.sql.types.LongType`.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.createDataFrame([('ABC', 3)], ['a', 'b']).select(hex('a'), hex('b')).collect()
[Row(hex(a)='414243', hex(b)='3')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.hex(_to_java_column(col))
return Column(jc)
def unhex(col):
"""Inverse of hex. Interprets each pair of characters as a hexadecimal number
and converts to the byte representation of number.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.createDataFrame([('414243',)], ['a']).select(unhex('a')).collect()
[Row(unhex(a)=bytearray(b'ABC'))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.unhex(_to_java_column(col)))
def length(col):
"""Computes the character length of string data or number of bytes of binary data.
The length of character data includes the trailing spaces. The length of binary data
includes binary zeros.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.createDataFrame([('ABC ',)], ['a']).select(length('a').alias('length')).collect()
[Row(length=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.length(_to_java_column(col)))
def translate(srcCol, matching, replace):
"""A function translate any character in the `srcCol` by a character in `matching`.
The characters in `replace` is corresponding to the characters in `matching`.
The translate will happen when any character in the string matching with the character
in the `matching`.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.createDataFrame([('translate',)], ['a']).select(translate('a', "rnlt", "123") \\
... .alias('r')).collect()
[Row(r='1a2s3ae')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.translate(_to_java_column(srcCol), matching, replace))
# ---------------------- Collection functions ------------------------------
def create_map(*cols):
"""Creates a new map column.
.. versionadded:: 2.0.0
Parameters
----------
cols : :class:`Column` or str
column names or :class:`Column`\\s that are
grouped as key-value pairs, e.g. (key1, value1, key2, value2, ...).
Examples
--------
>>> df.select(create_map('name', 'age').alias("map")).collect()
[Row(map={'Alice': 2}), Row(map={'Bob': 5})]
>>> df.select(create_map([df.name, df.age]).alias("map")).collect()
[Row(map={'Alice': 2}), Row(map={'Bob': 5})]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.map(_to_seq(sc, cols, _to_java_column))
return Column(jc)
def map_from_arrays(col1, col2):
"""Creates a new map from two arrays.
.. versionadded:: 2.4.0
Parameters
----------
col1 : :class:`Column` or str
name of column containing a set of keys. All elements should not be null
col2 : :class:`Column` or str
name of column containing a set of values
Examples
--------
>>> df = spark.createDataFrame([([2, 5], ['a', 'b'])], ['k', 'v'])
>>> df.select(map_from_arrays(df.k, df.v).alias("map")).show()
+----------------+
| map|
+----------------+
|{2 -> a, 5 -> b}|
+----------------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_from_arrays(_to_java_column(col1), _to_java_column(col2)))
def array(*cols):
"""Creates a new array column.
.. versionadded:: 1.4.0
Parameters
----------
cols : :class:`Column` or str
column names or :class:`Column`\\s that have
the same data type.
Examples
--------
>>> df.select(array('age', 'age').alias("arr")).collect()
[Row(arr=[2, 2]), Row(arr=[5, 5])]
>>> df.select(array([df.age, df.age]).alias("arr")).collect()
[Row(arr=[2, 2]), Row(arr=[5, 5])]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.array(_to_seq(sc, cols, _to_java_column))
return Column(jc)
def array_contains(col, value):
"""
Collection function: returns null if the array is null, true if the array contains the
given value, and false otherwise.
.. versionadded:: 1.5.0
Parameters
----------
col : :class:`Column` or str
name of column containing array
value :
value or column to check for in array
Examples
--------
>>> df = spark.createDataFrame([(["a", "b", "c"],), ([],)], ['data'])
>>> df.select(array_contains(df.data, "a")).collect()
[Row(array_contains(data, a)=True), Row(array_contains(data, a)=False)]
>>> df.select(array_contains(df.data, lit("a"))).collect()
[Row(array_contains(data, a)=True), Row(array_contains(data, a)=False)]
"""
sc = SparkContext._active_spark_context
value = value._jc if isinstance(value, Column) else value
return Column(sc._jvm.functions.array_contains(_to_java_column(col), value))
def arrays_overlap(a1, a2):
"""
Collection function: returns true if the arrays contain any common non-null element; if not,
returns null if both the arrays are non-empty and any of them contains a null element; returns
false otherwise.
.. versionadded:: 2.4.0
Examples
--------
>>> df = spark.createDataFrame([(["a", "b"], ["b", "c"]), (["a"], ["b", "c"])], ['x', 'y'])
>>> df.select(arrays_overlap(df.x, df.y).alias("overlap")).collect()
[Row(overlap=True), Row(overlap=False)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.arrays_overlap(_to_java_column(a1), _to_java_column(a2)))
def slice(x, start, length):
"""
Collection function: returns an array containing all the elements in `x` from index `start`
(array indices start at 1, or from the end if `start` is negative) with the specified `length`.
.. versionadded:: 2.4.0
Parameters
----------
x : :class:`Column` or str
the array to be sliced
start : :class:`Column` or int
the starting index
length : :class:`Column` or int
the length of the slice
Examples
--------
>>> df = spark.createDataFrame([([1, 2, 3],), ([4, 5],)], ['x'])
>>> df.select(slice(df.x, 2, 2).alias("sliced")).collect()
[Row(sliced=[2, 3]), Row(sliced=[5])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.slice(
_to_java_column(x),
start._jc if isinstance(start, Column) else start,
length._jc if isinstance(length, Column) else length
))
def array_join(col, delimiter, null_replacement=None):
"""
Concatenates the elements of `column` using the `delimiter`. Null values are replaced with
`null_replacement` if set, otherwise they are ignored.
.. versionadded:: 2.4.0
Examples
--------
>>> df = spark.createDataFrame([(["a", "b", "c"],), (["a", None],)], ['data'])
>>> df.select(array_join(df.data, ",").alias("joined")).collect()
[Row(joined='a,b,c'), Row(joined='a')]
>>> df.select(array_join(df.data, ",", "NULL").alias("joined")).collect()
[Row(joined='a,b,c'), Row(joined='a,NULL')]
"""
sc = SparkContext._active_spark_context
if null_replacement is None:
return Column(sc._jvm.functions.array_join(_to_java_column(col), delimiter))
else:
return Column(sc._jvm.functions.array_join(
_to_java_column(col), delimiter, null_replacement))
def concat(*cols):
"""
Concatenates multiple input columns together into a single column.
The function works with strings, binary and compatible array columns.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('abcd','123')], ['s', 'd'])
>>> df.select(concat(df.s, df.d).alias('s')).collect()
[Row(s='abcd123')]
>>> df = spark.createDataFrame([([1, 2], [3, 4], [5]), ([1, 2], None, [3])], ['a', 'b', 'c'])
>>> df.select(concat(df.a, df.b, df.c).alias("arr")).collect()
[Row(arr=[1, 2, 3, 4, 5]), Row(arr=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.concat(_to_seq(sc, cols, _to_java_column)))
def array_position(col, value):
"""
Collection function: Locates the position of the first occurrence of the given value
in the given array. Returns null if either of the arguments are null.
.. versionadded:: 2.4.0
Notes
-----
The position is not zero based, but 1 based index. Returns 0 if the given
value could not be found in the array.
Examples
--------
>>> df = spark.createDataFrame([(["c", "b", "a"],), ([],)], ['data'])
>>> df.select(array_position(df.data, "a")).collect()
[Row(array_position(data, a)=3), Row(array_position(data, a)=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_position(_to_java_column(col), value))
def element_at(col, extraction):
"""
Collection function: Returns element of array at given index in extraction if col is array.
Returns value for the given key in extraction if col is map.
.. versionadded:: 2.4.0
Parameters
----------
col : :class:`Column` or str
name of column containing array or map
extraction :
index to check for in array or key to check for in map
Notes
-----
The position is not zero based, but 1 based index.
Examples
--------
>>> df = spark.createDataFrame([(["a", "b", "c"],), ([],)], ['data'])
>>> df.select(element_at(df.data, 1)).collect()
[Row(element_at(data, 1)='a'), Row(element_at(data, 1)=None)]
>>> df = spark.createDataFrame([({"a": 1.0, "b": 2.0},), ({},)], ['data'])
>>> df.select(element_at(df.data, lit("a"))).collect()
[Row(element_at(data, a)=1.0), Row(element_at(data, a)=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.element_at(
_to_java_column(col), lit(extraction)._jc))
def array_remove(col, element):
"""
Collection function: Remove all elements that equal to element from the given array.
.. versionadded:: 2.4.0
Parameters
----------
col : :class:`Column` or str
name of column containing array
element :
element to be removed from the array
Examples
--------
>>> df = spark.createDataFrame([([1, 2, 3, 1, 1],), ([],)], ['data'])
>>> df.select(array_remove(df.data, 1)).collect()
[Row(array_remove(data, 1)=[2, 3]), Row(array_remove(data, 1)=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_remove(_to_java_column(col), element))
def array_distinct(col):
"""
Collection function: removes duplicate values from the array.
.. versionadded:: 2.4.0
Parameters
----------
col : :class:`Column` or str
name of column or expression
Examples
--------
>>> df = spark.createDataFrame([([1, 2, 3, 2],), ([4, 5, 5, 4],)], ['data'])
>>> df.select(array_distinct(df.data)).collect()
[Row(array_distinct(data)=[1, 2, 3]), Row(array_distinct(data)=[4, 5])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_distinct(_to_java_column(col)))
def array_intersect(col1, col2):
"""
Collection function: returns an array of the elements in the intersection of col1 and col2,
without duplicates.
.. versionadded:: 2.4.0
Parameters
----------
col1 : :class:`Column` or str
name of column containing array
col2 : :class:`Column` or str
name of column containing array
Examples
--------
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_intersect(df.c1, df.c2)).collect()
[Row(array_intersect(c1, c2)=['a', 'c'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_intersect(_to_java_column(col1), _to_java_column(col2)))
def array_union(col1, col2):
"""
Collection function: returns an array of the elements in the union of col1 and col2,
without duplicates.
.. versionadded:: 2.4.0
Parameters
----------
col1 : :class:`Column` or str
name of column containing array
col2 : :class:`Column` or str
name of column containing array
Examples
--------
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_union(df.c1, df.c2)).collect()
[Row(array_union(c1, c2)=['b', 'a', 'c', 'd', 'f'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_union(_to_java_column(col1), _to_java_column(col2)))
def array_except(col1, col2):
"""
Collection function: returns an array of the elements in col1 but not in col2,
without duplicates.
.. versionadded:: 2.4.0
Parameters
----------
col1 : :class:`Column` or str
name of column containing array
col2 : :class:`Column` or str
name of column containing array
Examples
--------
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_except(df.c1, df.c2)).collect()
[Row(array_except(c1, c2)=['b'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_except(_to_java_column(col1), _to_java_column(col2)))
def explode(col):
"""
Returns a new row for each element in the given array or map.
Uses the default column name `col` for elements in the array and
`key` and `value` for elements in the map unless specified otherwise.
.. versionadded:: 1.4.0
Examples
--------
>>> from pyspark.sql import Row
>>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})])
>>> eDF.select(explode(eDF.intlist).alias("anInt")).collect()
[Row(anInt=1), Row(anInt=2), Row(anInt=3)]
>>> eDF.select(explode(eDF.mapfield).alias("key", "value")).show()
+---+-----+
|key|value|
+---+-----+
| a| b|
+---+-----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.explode(_to_java_column(col))
return Column(jc)
def posexplode(col):
"""
Returns a new row for each element with position in the given array or map.
Uses the default column name `pos` for position, and `col` for elements in the
array and `key` and `value` for elements in the map unless specified otherwise.
.. versionadded:: 2.1.0
Examples
--------
>>> from pyspark.sql import Row
>>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})])
>>> eDF.select(posexplode(eDF.intlist)).collect()
[Row(pos=0, col=1), Row(pos=1, col=2), Row(pos=2, col=3)]
>>> eDF.select(posexplode(eDF.mapfield)).show()
+---+---+-----+
|pos|key|value|
+---+---+-----+
| 0| a| b|
+---+---+-----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.posexplode(_to_java_column(col))
return Column(jc)
def explode_outer(col):
"""
Returns a new row for each element in the given array or map.
Unlike explode, if the array/map is null or empty then null is produced.
Uses the default column name `col` for elements in the array and
`key` and `value` for elements in the map unless specified otherwise.
.. versionadded:: 2.3.0
Examples
--------
>>> df = spark.createDataFrame(
... [(1, ["foo", "bar"], {"x": 1.0}), (2, [], {}), (3, None, None)],
... ("id", "an_array", "a_map")
... )
>>> df.select("id", "an_array", explode_outer("a_map")).show()
+---+----------+----+-----+
| id| an_array| key|value|
+---+----------+----+-----+
| 1|[foo, bar]| x| 1.0|
| 2| []|null| null|
| 3| null|null| null|
+---+----------+----+-----+
>>> df.select("id", "a_map", explode_outer("an_array")).show()
+---+----------+----+
| id| a_map| col|
+---+----------+----+
| 1|{x -> 1.0}| foo|
| 1|{x -> 1.0}| bar|
| 2| {}|null|
| 3| null|null|
+---+----------+----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.explode_outer(_to_java_column(col))
return Column(jc)
def posexplode_outer(col):
"""
Returns a new row for each element with position in the given array or map.
Unlike posexplode, if the array/map is null or empty then the row (null, null) is produced.
Uses the default column name `pos` for position, and `col` for elements in the
array and `key` and `value` for elements in the map unless specified otherwise.
.. versionadded:: 2.3.0
Examples
--------
>>> df = spark.createDataFrame(
... [(1, ["foo", "bar"], {"x": 1.0}), (2, [], {}), (3, None, None)],
... ("id", "an_array", "a_map")
... )
>>> df.select("id", "an_array", posexplode_outer("a_map")).show()
+---+----------+----+----+-----+
| id| an_array| pos| key|value|
+---+----------+----+----+-----+
| 1|[foo, bar]| 0| x| 1.0|
| 2| []|null|null| null|
| 3| null|null|null| null|
+---+----------+----+----+-----+
>>> df.select("id", "a_map", posexplode_outer("an_array")).show()
+---+----------+----+----+
| id| a_map| pos| col|
+---+----------+----+----+
| 1|{x -> 1.0}| 0| foo|
| 1|{x -> 1.0}| 1| bar|
| 2| {}|null|null|
| 3| null|null|null|
+---+----------+----+----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.posexplode_outer(_to_java_column(col))
return Column(jc)
def get_json_object(col, path):
"""
Extracts json object from a json string based on json path specified, and returns json string
of the extracted json object. It will return null if the input json string is invalid.
.. versionadded:: 1.6.0
Parameters
----------
col : :class:`Column` or str
string column in json format
path : str
path to the json object to extract
Examples
--------
>>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')]
>>> df = spark.createDataFrame(data, ("key", "jstring"))
>>> df.select(df.key, get_json_object(df.jstring, '$.f1').alias("c0"), \\
... get_json_object(df.jstring, '$.f2').alias("c1") ).collect()
[Row(key='1', c0='value1', c1='value2'), Row(key='2', c0='value12', c1=None)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.get_json_object(_to_java_column(col), path)
return Column(jc)
def json_tuple(col, *fields):
"""Creates a new row for a json column according to the given field names.
.. versionadded:: 1.6.0
Parameters
----------
col : :class:`Column` or str
string column in json format
fields : str
fields to extract
Examples
--------
>>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')]
>>> df = spark.createDataFrame(data, ("key", "jstring"))
>>> df.select(df.key, json_tuple(df.jstring, 'f1', 'f2')).collect()
[Row(key='1', c0='value1', c1='value2'), Row(key='2', c0='value12', c1=None)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.json_tuple(_to_java_column(col), _to_seq(sc, fields))
return Column(jc)
def from_json(col, schema, options={}):
"""
Parses a column containing a JSON string into a :class:`MapType` with :class:`StringType`
as keys type, :class:`StructType` or :class:`ArrayType` with
the specified schema. Returns `null`, in the case of an unparseable string.
.. versionadded:: 2.1.0
Parameters
----------
col : :class:`Column` or str
string column in json format
schema : :class:`DataType` or str
a StructType or ArrayType of StructType to use when parsing the json column.
.. versionchanged:: 2.3
the DDL-formatted string is also supported for ``schema``.
options : dict, optional
options to control parsing. accepts the same options as the json datasource
Examples
--------
>>> from pyspark.sql.types import *
>>> data = [(1, '''{"a": 1}''')]
>>> schema = StructType([StructField("a", IntegerType())])
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=Row(a=1))]
>>> df.select(from_json(df.value, "a INT").alias("json")).collect()
[Row(json=Row(a=1))]
>>> df.select(from_json(df.value, "MAP<STRING,INT>").alias("json")).collect()
[Row(json={'a': 1})]
>>> data = [(1, '''[{"a": 1}]''')]
>>> schema = ArrayType(StructType([StructField("a", IntegerType())]))
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=[Row(a=1)])]
>>> schema = schema_of_json(lit('''{"a": 0}'''))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=Row(a=None))]
>>> data = [(1, '''[1, 2, 3]''')]
>>> schema = ArrayType(IntegerType())
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=[1, 2, 3])]
"""
sc = SparkContext._active_spark_context
if isinstance(schema, DataType):
schema = schema.json()
elif isinstance(schema, Column):
schema = _to_java_column(schema)
jc = sc._jvm.functions.from_json(_to_java_column(col), schema, _options_to_str(options))
return Column(jc)
def to_json(col, options={}):
"""
Converts a column containing a :class:`StructType`, :class:`ArrayType` or a :class:`MapType`
into a JSON string. Throws an exception, in the case of an unsupported type.
.. versionadded:: 2.1.0
Parameters
----------
col : :class:`Column` or str
name of column containing a struct, an array or a map.
options : dict, optional
options to control converting. accepts the same options as the JSON datasource.
Additionally the function supports the `pretty` option which enables
pretty JSON generation.
Examples
--------
>>> from pyspark.sql import Row
>>> from pyspark.sql.types import *
>>> data = [(1, Row(age=2, name='Alice'))]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json='{"age":2,"name":"Alice"}')]
>>> data = [(1, [Row(age=2, name='Alice'), Row(age=3, name='Bob')])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json='[{"age":2,"name":"Alice"},{"age":3,"name":"Bob"}]')]
>>> data = [(1, {"name": "Alice"})]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json='{"name":"Alice"}')]
>>> data = [(1, [{"name": "Alice"}, {"name": "Bob"}])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json='[{"name":"Alice"},{"name":"Bob"}]')]
>>> data = [(1, ["Alice", "Bob"])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json='["Alice","Bob"]')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.to_json(_to_java_column(col), _options_to_str(options))
return Column(jc)
def schema_of_json(json, options={}):
"""
Parses a JSON string and infers its schema in DDL format.
.. versionadded:: 2.4.0
Parameters
----------
json : :class:`Column` or str
a JSON string or a foldable string column containing a JSON string.
options : dict, optional
options to control parsing. accepts the same options as the JSON datasource
.. versionchanged:: 3.0
It accepts `options` parameter to control schema inferring.
Examples
--------
>>> df = spark.range(1)
>>> df.select(schema_of_json(lit('{"a": 0}')).alias("json")).collect()
[Row(json='STRUCT<`a`: BIGINT>')]
>>> schema = schema_of_json('{a: 1}', {'allowUnquotedFieldNames':'true'})
>>> df.select(schema.alias("json")).collect()
[Row(json='STRUCT<`a`: BIGINT>')]
"""
if isinstance(json, str):
col = _create_column_from_literal(json)
elif isinstance(json, Column):
col = _to_java_column(json)
else:
raise TypeError("schema argument should be a column or string")
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.schema_of_json(col, _options_to_str(options))
return Column(jc)
def schema_of_csv(csv, options={}):
"""
Parses a CSV string and infers its schema in DDL format.
.. versionadded:: 3.0.0
Parameters
----------
csv : :class:`Column` or str
a CSV string or a foldable string column containing a CSV string.
options : dict, optional
options to control parsing. accepts the same options as the CSV datasource
Examples
--------
>>> df = spark.range(1)
>>> df.select(schema_of_csv(lit('1|a'), {'sep':'|'}).alias("csv")).collect()
[Row(csv='STRUCT<`_c0`: INT, `_c1`: STRING>')]
>>> df.select(schema_of_csv('1|a', {'sep':'|'}).alias("csv")).collect()
[Row(csv='STRUCT<`_c0`: INT, `_c1`: STRING>')]
"""
if isinstance(csv, str):
col = _create_column_from_literal(csv)
elif isinstance(csv, Column):
col = _to_java_column(csv)
else:
raise TypeError("schema argument should be a column or string")
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.schema_of_csv(col, _options_to_str(options))
return Column(jc)
def to_csv(col, options={}):
"""
Converts a column containing a :class:`StructType` into a CSV string.
Throws an exception, in the case of an unsupported type.
.. versionadded:: 3.0.0
Parameters
----------
col : :class:`Column` or str
name of column containing a struct.
options: dict, optional
options to control converting. accepts the same options as the CSV datasource.
Examples
--------
>>> from pyspark.sql import Row
>>> data = [(1, Row(age=2, name='Alice'))]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_csv(df.value).alias("csv")).collect()
[Row(csv='2,Alice')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.to_csv(_to_java_column(col), _options_to_str(options))
return Column(jc)
def size(col):
"""
Collection function: returns the length of the array or map stored in the column.
.. versionadded:: 1.5.0
Parameters
----------
col : :class:`Column` or str
name of column or expression
Examples
--------
>>> df = spark.createDataFrame([([1, 2, 3],),([1],),([],)], ['data'])
>>> df.select(size(df.data)).collect()
[Row(size(data)=3), Row(size(data)=1), Row(size(data)=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.size(_to_java_column(col)))
def array_min(col):
"""
Collection function: returns the minimum value of the array.
.. versionadded:: 2.4.0
Parameters
----------
col : :class:`Column` or str
name of column or expression
Examples
--------
>>> df = spark.createDataFrame([([2, 1, 3],), ([None, 10, -1],)], ['data'])
>>> df.select(array_min(df.data).alias('min')).collect()
[Row(min=1), Row(min=-1)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_min(_to_java_column(col)))
def array_max(col):
"""
Collection function: returns the maximum value of the array.
.. versionadded:: 2.4.0
Parameters
----------
col : :class:`Column` or str
name of column or expression
Examples
--------
>>> df = spark.createDataFrame([([2, 1, 3],), ([None, 10, -1],)], ['data'])
>>> df.select(array_max(df.data).alias('max')).collect()
[Row(max=3), Row(max=10)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_max(_to_java_column(col)))
def sort_array(col, asc=True):
"""
Collection function: sorts the input array in ascending or descending order according
to the natural ordering of the array elements. Null elements will be placed at the beginning
of the returned array in ascending order or at the end of the returned array in descending
order.
.. versionadded:: 1.5.0
Parameters
----------
col : :class:`Column` or str
name of column or expression
asc : bool, optional
Examples
--------
>>> df = spark.createDataFrame([([2, 1, None, 3],),([1],),([],)], ['data'])
>>> df.select(sort_array(df.data).alias('r')).collect()
[Row(r=[None, 1, 2, 3]), Row(r=[1]), Row(r=[])]
>>> df.select(sort_array(df.data, asc=False).alias('r')).collect()
[Row(r=[3, 2, 1, None]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.sort_array(_to_java_column(col), asc))
def array_sort(col):
"""
Collection function: sorts the input array in ascending order. The elements of the input array
must be orderable. Null elements will be placed at the end of the returned array.
.. versionadded:: 2.4.0
Parameters
----------
col : :class:`Column` or str
name of column or expression
Examples
--------
>>> df = spark.createDataFrame([([2, 1, None, 3],),([1],),([],)], ['data'])
>>> df.select(array_sort(df.data).alias('r')).collect()
[Row(r=[1, 2, 3, None]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_sort(_to_java_column(col)))
def shuffle(col):
"""
Collection function: Generates a random permutation of the given array.
.. versionadded:: 2.4.0
Parameters
----------
col : :class:`Column` or str
name of column or expression
Notes
-----
The function is non-deterministic.
Examples
--------
>>> df = spark.createDataFrame([([1, 20, 3, 5],), ([1, 20, None, 3],)], ['data'])
>>> df.select(shuffle(df.data).alias('s')).collect() # doctest: +SKIP
[Row(s=[3, 1, 5, 20]), Row(s=[20, None, 3, 1])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.shuffle(_to_java_column(col)))
def reverse(col):
"""
Collection function: returns a reversed string or an array with reverse order of elements.
.. versionadded:: 1.5.0
Parameters
----------
col : :class:`Column` or str
name of column or expression
Examples
--------
>>> df = spark.createDataFrame([('Spark SQL',)], ['data'])
>>> df.select(reverse(df.data).alias('s')).collect()
[Row(s='LQS krapS')]
>>> df = spark.createDataFrame([([2, 1, 3],) ,([1],) ,([],)], ['data'])
>>> df.select(reverse(df.data).alias('r')).collect()
[Row(r=[3, 1, 2]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.reverse(_to_java_column(col)))
def flatten(col):
"""
Collection function: creates a single array from an array of arrays.
If a structure of nested arrays is deeper than two levels,
only one level of nesting is removed.
.. versionadded:: 2.4.0
Parameters
----------
col : :class:`Column` or str
name of column or expression
Examples
--------
>>> df = spark.createDataFrame([([[1, 2, 3], [4, 5], [6]],), ([None, [4, 5]],)], ['data'])
>>> df.select(flatten(df.data).alias('r')).collect()
[Row(r=[1, 2, 3, 4, 5, 6]), Row(r=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.flatten(_to_java_column(col)))
def map_keys(col):
"""
Collection function: Returns an unordered array containing the keys of the map.
.. versionadded:: 2.3.0
Parameters
----------
col : :class:`Column` or str
name of column or expression
Examples
--------
>>> from pyspark.sql.functions import map_keys
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_keys("data").alias("keys")).show()
+------+
| keys|
+------+
|[1, 2]|
+------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_keys(_to_java_column(col)))
def map_values(col):
"""
Collection function: Returns an unordered array containing the values of the map.
.. versionadded:: 2.3.0
Parameters
----------
col : :class:`Column` or str
name of column or expression
Examples
--------
>>> from pyspark.sql.functions import map_values
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_values("data").alias("values")).show()
+------+
|values|
+------+
|[a, b]|
+------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_values(_to_java_column(col)))
def map_entries(col):
"""
Collection function: Returns an unordered array of all entries in the given map.
.. versionadded:: 3.0.0
Parameters
----------
col : :class:`Column` or str
name of column or expression
Examples
--------
>>> from pyspark.sql.functions import map_entries
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_entries("data").alias("entries")).show()
+----------------+
| entries|
+----------------+
|[{1, a}, {2, b}]|
+----------------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_entries(_to_java_column(col)))
def map_from_entries(col):
"""
Collection function: Returns a map created from the given array of entries.
.. versionadded:: 2.4.0
Parameters
----------
col : :class:`Column` or str
name of column or expression
Examples
--------
>>> from pyspark.sql.functions import map_from_entries
>>> df = spark.sql("SELECT array(struct(1, 'a'), struct(2, 'b')) as data")
>>> df.select(map_from_entries("data").alias("map")).show()
+----------------+
| map|
+----------------+
|{1 -> a, 2 -> b}|
+----------------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_from_entries(_to_java_column(col)))
def array_repeat(col, count):
"""
Collection function: creates an array containing a column repeated count times.
.. versionadded:: 2.4.0
Examples
--------
>>> df = spark.createDataFrame([('ab',)], ['data'])
>>> df.select(array_repeat(df.data, 3).alias('r')).collect()
[Row(r=['ab', 'ab', 'ab'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_repeat(
_to_java_column(col),
_to_java_column(count) if isinstance(count, Column) else count
))
def arrays_zip(*cols):
"""
Collection function: Returns a merged array of structs in which the N-th struct contains all
N-th values of input arrays.
.. versionadded:: 2.4.0
Parameters
----------
cols : :class:`Column` or str
columns of arrays to be merged.
Examples
--------
>>> from pyspark.sql.functions import arrays_zip
>>> df = spark.createDataFrame([(([1, 2, 3], [2, 3, 4]))], ['vals1', 'vals2'])
>>> df.select(arrays_zip(df.vals1, df.vals2).alias('zipped')).collect()
[Row(zipped=[Row(vals1=1, vals2=2), Row(vals1=2, vals2=3), Row(vals1=3, vals2=4)])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.arrays_zip(_to_seq(sc, cols, _to_java_column)))
def map_concat(*cols):
"""Returns the union of all the given maps.
.. versionadded:: 2.4.0
Parameters
----------
cols : :class:`Column` or str
column names or :class:`Column`\\s
Examples
--------
>>> from pyspark.sql.functions import map_concat
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as map1, map(3, 'c') as map2")
>>> df.select(map_concat("map1", "map2").alias("map3")).show(truncate=False)
+------------------------+
|map3 |
+------------------------+
|{1 -> a, 2 -> b, 3 -> c}|
+------------------------+
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.map_concat(_to_seq(sc, cols, _to_java_column))
return Column(jc)
def sequence(start, stop, step=None):
"""
Generate a sequence of integers from `start` to `stop`, incrementing by `step`.
If `step` is not set, incrementing by 1 if `start` is less than or equal to `stop`,
otherwise -1.
.. versionadded:: 2.4.0
Examples
--------
>>> df1 = spark.createDataFrame([(-2, 2)], ('C1', 'C2'))
>>> df1.select(sequence('C1', 'C2').alias('r')).collect()
[Row(r=[-2, -1, 0, 1, 2])]
>>> df2 = spark.createDataFrame([(4, -4, -2)], ('C1', 'C2', 'C3'))
>>> df2.select(sequence('C1', 'C2', 'C3').alias('r')).collect()
[Row(r=[4, 2, 0, -2, -4])]
"""
sc = SparkContext._active_spark_context
if step is None:
return Column(sc._jvm.functions.sequence(_to_java_column(start), _to_java_column(stop)))
else:
return Column(sc._jvm.functions.sequence(
_to_java_column(start), _to_java_column(stop), _to_java_column(step)))
def from_csv(col, schema, options={}):
"""
Parses a column containing a CSV string to a row with the specified schema.
Returns `null`, in the case of an unparseable string.
.. versionadded:: 3.0.0
Parameters
----------
col : :class:`Column` or str
string column in CSV format
schema :class:`Column` or str
a string with schema in DDL format to use when parsing the CSV column.
options : dict, optional
options to control parsing. accepts the same options as the CSV datasource
Examples
--------
>>> data = [("1,2,3",)]
>>> df = spark.createDataFrame(data, ("value",))
>>> df.select(from_csv(df.value, "a INT, b INT, c INT").alias("csv")).collect()
[Row(csv=Row(a=1, b=2, c=3))]
>>> value = data[0][0]
>>> df.select(from_csv(df.value, schema_of_csv(value)).alias("csv")).collect()
[Row(csv=Row(_c0=1, _c1=2, _c2=3))]
>>> data = [(" abc",)]
>>> df = spark.createDataFrame(data, ("value",))
>>> options = {'ignoreLeadingWhiteSpace': True}
>>> df.select(from_csv(df.value, "s string", options).alias("csv")).collect()
[Row(csv=Row(s='abc'))]
"""
sc = SparkContext._active_spark_context
if isinstance(schema, str):
schema = _create_column_from_literal(schema)
elif isinstance(schema, Column):
schema = _to_java_column(schema)
else:
raise TypeError("schema argument should be a column or string")
jc = sc._jvm.functions.from_csv(_to_java_column(col), schema, _options_to_str(options))
return Column(jc)
def _unresolved_named_lambda_variable(*name_parts):
"""
Create `o.a.s.sql.expressions.UnresolvedNamedLambdaVariable`,
convert it to o.s.sql.Column and wrap in Python `Column`
Parameters
----------
name_parts : str
"""
sc = SparkContext._active_spark_context
name_parts_seq = _to_seq(sc, name_parts)
expressions = sc._jvm.org.apache.spark.sql.catalyst.expressions
return Column(
sc._jvm.Column(
expressions.UnresolvedNamedLambdaVariable(name_parts_seq)
)
)
def _get_lambda_parameters(f):
import inspect
signature = inspect.signature(f)
parameters = signature.parameters.values()
# We should exclude functions that use
# variable args and keyword argnames
# as well as keyword only args
supported_parmeter_types = {
inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.POSITIONAL_ONLY,
}
# Validate that
# function arity is between 1 and 3
if not (1 <= len(parameters) <= 3):
raise ValueError(
"f should take between 1 and 3 arguments, but provided function takes {}".format(
len(parameters)
)
)
# and all arguments can be used as positional
if not all(p.kind in supported_parmeter_types for p in parameters):
raise ValueError(
"f should use only POSITIONAL or POSITIONAL OR KEYWORD arguments"
)
return parameters
def _create_lambda(f):
"""
Create `o.a.s.sql.expressions.LambdaFunction` corresponding
to transformation described by f
:param f: A Python of one of the following forms:
- (Column) -> Column: ...
- (Column, Column) -> Column: ...
- (Column, Column, Column) -> Column: ...
"""
parameters = _get_lambda_parameters(f)
sc = SparkContext._active_spark_context
expressions = sc._jvm.org.apache.spark.sql.catalyst.expressions
argnames = ["x", "y", "z"]
args = [
_unresolved_named_lambda_variable(arg) for arg in argnames[: len(parameters)]
]
result = f(*args)
if not isinstance(result, Column):
raise ValueError("f should return Column, got {}".format(type(result)))
jexpr = result._jc.expr()
jargs = _to_seq(sc, [arg._jc.expr() for arg in args])
return expressions.LambdaFunction(jexpr, jargs, False)
def _invoke_higher_order_function(name, cols, funs):
"""
Invokes expression identified by name,
(relative to ```org.apache.spark.sql.catalyst.expressions``)
and wraps the result with Column (first Scala one, then Python).
:param name: Name of the expression
:param cols: a list of columns
:param funs: a list of((*Column) -> Column functions.
:return: a Column
"""
sc = SparkContext._active_spark_context
expressions = sc._jvm.org.apache.spark.sql.catalyst.expressions
expr = getattr(expressions, name)
jcols = [_to_java_column(col).expr() for col in cols]
jfuns = [_create_lambda(f) for f in funs]
return Column(sc._jvm.Column(expr(*jcols + jfuns)))
def transform(col, f):
"""
Returns an array of elements after applying a transformation to each element in the input array.
.. versionadded:: 3.1.0
Parameters
----------
col : :class:`Column` or str
name of column or expression
f : function
a function that is applied to each element of the input array.
Can take one of the following forms:
- Unary ``(x: Column) -> Column: ...``
- Binary ``(x: Column, i: Column) -> Column...``, where the second argument is
a 0-based index of the element.
and can use methods of :class:`pyspark.sql.Column`, functions defined in
:py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``.
Python ``UserDefinedFunctions`` are not supported
(`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__).
Returns
-------
:class:`pyspark.sql.Column`
Examples
--------
>>> df = spark.createDataFrame([(1, [1, 2, 3, 4])], ("key", "values"))
>>> df.select(transform("values", lambda x: x * 2).alias("doubled")).show()
+------------+
| doubled|
+------------+
|[2, 4, 6, 8]|
+------------+
>>> def alternate(x, i):
... return when(i % 2 == 0, x).otherwise(-x)
>>> df.select(transform("values", alternate).alias("alternated")).show()
+--------------+
| alternated|
+--------------+
|[1, -2, 3, -4]|
+--------------+
"""
return _invoke_higher_order_function("ArrayTransform", [col], [f])
def exists(col, f):
"""
Returns whether a predicate holds for one or more elements in the array.
.. versionadded:: 3.1.0
Parameters
----------
col : :class:`Column` or str
name of column or expression
f : function
``(x: Column) -> Column: ...`` returning the Boolean expression.
Can use methods of :class:`pyspark.sql.Column`, functions defined in
:py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``.
Python ``UserDefinedFunctions`` are not supported
(`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__).
:return: a :class:`pyspark.sql.Column`
Examples
--------
>>> df = spark.createDataFrame([(1, [1, 2, 3, 4]), (2, [3, -1, 0])],("key", "values"))
>>> df.select(exists("values", lambda x: x < 0).alias("any_negative")).show()
+------------+
|any_negative|
+------------+
| false|
| true|
+------------+
"""
return _invoke_higher_order_function("ArrayExists", [col], [f])
def forall(col, f):
"""
Returns whether a predicate holds for every element in the array.
.. versionadded:: 3.1.0
Parameters
----------
col : :class:`Column` or str
name of column or expression
f : function
``(x: Column) -> Column: ...`` returning the Boolean expression.
Can use methods of :class:`pyspark.sql.Column`, functions defined in
:py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``.
Python ``UserDefinedFunctions`` are not supported
(`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__).
Returns
-------
:class:`pyspark.sql.Column`
Examples
--------
>>> df = spark.createDataFrame(
... [(1, ["bar"]), (2, ["foo", "bar"]), (3, ["foobar", "foo"])],
... ("key", "values")
... )
>>> df.select(forall("values", lambda x: x.rlike("foo")).alias("all_foo")).show()
+-------+
|all_foo|
+-------+
| false|
| false|
| true|
+-------+
"""
return _invoke_higher_order_function("ArrayForAll", [col], [f])
def filter(col, f):
"""
Returns an array of elements for which a predicate holds in a given array.
.. versionadded:: 3.1.0
Parameters
----------
col : :class:`Column` or str
name of column or expression
f : function
A function that returns the Boolean expression.
Can take one of the following forms:
- Unary ``(x: Column) -> Column: ...``
- Binary ``(x: Column, i: Column) -> Column...``, where the second argument is
a 0-based index of the element.
and can use methods of :class:`pyspark.sql.Column`, functions defined in
:py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``.
Python ``UserDefinedFunctions`` are not supported
(`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__).
Returns
-------
:class:`pyspark.sql.Column`
Examples
--------
>>> df = spark.createDataFrame(
... [(1, ["2018-09-20", "2019-02-03", "2019-07-01", "2020-06-01"])],
... ("key", "values")
... )
>>> def after_second_quarter(x):
... return month(to_date(x)) > 6
>>> df.select(
... filter("values", after_second_quarter).alias("after_second_quarter")
... ).show(truncate=False)
+------------------------+
|after_second_quarter |
+------------------------+
|[2018-09-20, 2019-07-01]|
+------------------------+
"""
return _invoke_higher_order_function("ArrayFilter", [col], [f])
def aggregate(col, zero, merge, finish=None):
"""
Applies a binary operator to an initial state and all elements in the array,
and reduces this to a single state. The final state is converted into the final result
by applying a finish function.
Both functions can use methods of :class:`pyspark.sql.Column`, functions defined in
:py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``.
Python ``UserDefinedFunctions`` are not supported
(`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__).
.. versionadded:: 3.1.0
Parameters
----------
col : :class:`Column` or str
name of column or expression
zero : :class:`Column` or str
initial value. Name of column or expression
merge : function
a binary function ``(acc: Column, x: Column) -> Column...`` returning expression
of the same type as ``zero``
finish : function
an optional unary function ``(x: Column) -> Column: ...``
used to convert accumulated value.
Returns
-------
:class:`pyspark.sql.Column`
Examples
--------
>>> df = spark.createDataFrame([(1, [20.0, 4.0, 2.0, 6.0, 10.0])], ("id", "values"))
>>> df.select(aggregate("values", lit(0.0), lambda acc, x: acc + x).alias("sum")).show()
+----+
| sum|
+----+
|42.0|
+----+
>>> def merge(acc, x):
... count = acc.count + 1
... sum = acc.sum + x
... return struct(count.alias("count"), sum.alias("sum"))
>>> df.select(
... aggregate(
... "values",
... struct(lit(0).alias("count"), lit(0.0).alias("sum")),
... merge,
... lambda acc: acc.sum / acc.count,
... ).alias("mean")
... ).show()
+----+
|mean|
+----+
| 8.4|
+----+
"""
if finish is not None:
return _invoke_higher_order_function(
"ArrayAggregate",
[col, zero],
[merge, finish]
)
else:
return _invoke_higher_order_function(
"ArrayAggregate",
[col, zero],
[merge]
)
def zip_with(col1, col2, f):
"""
Merge two given arrays, element-wise, into a single array using a function.
If one array is shorter, nulls are appended at the end to match the length of the longer
array, before applying the function.
.. versionadded:: 3.1.0
Parameters
----------
col1 : :class:`Column` or str
name of the first column or expression
col2 : :class:`Column` or str
name of the second column or expression
f : function
a binary function ``(x1: Column, x2: Column) -> Column...``
Can use methods of :class:`pyspark.sql.Column`, functions defined in
:py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``.
Python ``UserDefinedFunctions`` are not supported
(`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__).
Returns
-------
:class:`pyspark.sql.Column`
Examples
--------
>>> df = spark.createDataFrame([(1, [1, 3, 5, 8], [0, 2, 4, 6])], ("id", "xs", "ys"))
>>> df.select(zip_with("xs", "ys", lambda x, y: x ** y).alias("powers")).show(truncate=False)
+---------------------------+
|powers |
+---------------------------+
|[1.0, 9.0, 625.0, 262144.0]|
+---------------------------+
>>> df = spark.createDataFrame([(1, ["foo", "bar"], [1, 2, 3])], ("id", "xs", "ys"))
>>> df.select(zip_with("xs", "ys", lambda x, y: concat_ws("_", x, y)).alias("xs_ys")).show()
+-----------------+
| xs_ys|
+-----------------+
|[foo_1, bar_2, 3]|
+-----------------+
"""
return _invoke_higher_order_function("ZipWith", [col1, col2], [f])
def transform_keys(col, f):
"""
Applies a function to every key-value pair in a map and returns
a map with the results of those applications as the new keys for the pairs.
.. versionadded:: 3.1.0
Parameters
----------
col : :class:`Column` or str
name of column or expression
f : function
a binary function ``(k: Column, v: Column) -> Column...``
Can use methods of :class:`pyspark.sql.Column`, functions defined in
:py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``.
Python ``UserDefinedFunctions`` are not supported
(`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__).
Returns
-------
:class:`pyspark.sql.Column`
Examples
--------
>>> df = spark.createDataFrame([(1, {"foo": -2.0, "bar": 2.0})], ("id", "data"))
>>> df.select(transform_keys(
... "data", lambda k, _: upper(k)).alias("data_upper")
... ).show(truncate=False)
+-------------------------+
|data_upper |
+-------------------------+
|{BAR -> 2.0, FOO -> -2.0}|
+-------------------------+
"""
return _invoke_higher_order_function("TransformKeys", [col], [f])
def transform_values(col, f):
"""
Applies a function to every key-value pair in a map and returns
a map with the results of those applications as the new values for the pairs.
.. versionadded:: 3.1.0
Parameters
----------
col : :class:`Column` or str
name of column or expression
f : function
a binary function ``(k: Column, v: Column) -> Column...``
Can use methods of :class:`pyspark.sql.Column`, functions defined in
:py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``.
Python ``UserDefinedFunctions`` are not supported
(`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__).
Returns
-------
:class:`pyspark.sql.Column`
Examples
--------
>>> df = spark.createDataFrame([(1, {"IT": 10.0, "SALES": 2.0, "OPS": 24.0})], ("id", "data"))
>>> df.select(transform_values(
... "data", lambda k, v: when(k.isin("IT", "OPS"), v + 10.0).otherwise(v)
... ).alias("new_data")).show(truncate=False)
+---------------------------------------+
|new_data |
+---------------------------------------+
|{OPS -> 34.0, IT -> 20.0, SALES -> 2.0}|
+---------------------------------------+
"""
return _invoke_higher_order_function("TransformValues", [col], [f])
def map_filter(col, f):
"""
Returns a map whose key-value pairs satisfy a predicate.
.. versionadded:: 3.1.0
Parameters
----------
col : :class:`Column` or str
name of column or expression
f : function
a binary function ``(k: Column, v: Column) -> Column...``
Can use methods of :class:`pyspark.sql.Column`, functions defined in
:py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``.
Python ``UserDefinedFunctions`` are not supported
(`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__).
Returns
-------
:class:`pyspark.sql.Column`
Examples
--------
>>> df = spark.createDataFrame([(1, {"foo": 42.0, "bar": 1.0, "baz": 32.0})], ("id", "data"))
>>> df.select(map_filter(
... "data", lambda _, v: v > 30.0).alias("data_filtered")
... ).show(truncate=False)
+--------------------------+
|data_filtered |
+--------------------------+
|{baz -> 32.0, foo -> 42.0}|
+--------------------------+
"""
return _invoke_higher_order_function("MapFilter", [col], [f])
def map_zip_with(col1, col2, f):
"""
Merge two given maps, key-wise into a single map using a function.
.. versionadded:: 3.1.0
Parameters
----------
col1 : :class:`Column` or str
name of the first column or expression
col2 : :class:`Column` or str
name of the second column or expression
f : function
a ternary function ``(k: Column, v1: Column, v2: Column) -> Column...``
Can use methods of :class:`pyspark.sql.Column`, functions defined in
:py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``.
Python ``UserDefinedFunctions`` are not supported
(`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__).
Returns
-------
:class:`pyspark.sql.Column`
Examples
--------
>>> df = spark.createDataFrame([
... (1, {"IT": 24.0, "SALES": 12.00}, {"IT": 2.0, "SALES": 1.4})],
... ("id", "base", "ratio")
... )
>>> df.select(map_zip_with(
... "base", "ratio", lambda k, v1, v2: round(v1 * v2, 2)).alias("updated_data")
... ).show(truncate=False)
+---------------------------+
|updated_data |
+---------------------------+
|{SALES -> 16.8, IT -> 48.0}|
+---------------------------+
"""
return _invoke_higher_order_function("MapZipWith", [col1, col2], [f])
# ---------------------- Partition transform functions --------------------------------
def years(col):
"""
Partition transform function: A transform for timestamps and dates
to partition data into years.
.. versionadded:: 3.1.0
Examples
--------
>>> df.writeTo("catalog.db.table").partitionedBy( # doctest: +SKIP
... years("ts")
... ).createOrReplace()
Notes
-----
This function can be used only in combinatiion with
:py:meth:`~pyspark.sql.readwriter.DataFrameWriterV2.partitionedBy`
method of the `DataFrameWriterV2`.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.years(_to_java_column(col)))
def months(col):
"""
Partition transform function: A transform for timestamps and dates
to partition data into months.
.. versionadded:: 3.1.0
Examples
--------
>>> df.writeTo("catalog.db.table").partitionedBy(
... months("ts")
... ).createOrReplace() # doctest: +SKIP
Notes
-----
This function can be used only in combinatiion with
:py:meth:`~pyspark.sql.readwriter.DataFrameWriterV2.partitionedBy`
method of the `DataFrameWriterV2`.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.months(_to_java_column(col)))
def days(col):
"""
Partition transform function: A transform for timestamps and dates
to partition data into days.
.. versionadded:: 3.1.0
Examples
--------
>>> df.writeTo("catalog.db.table").partitionedBy( # doctest: +SKIP
... days("ts")
... ).createOrReplace()
Notes
-----
This function can be used only in combinatiion with
:py:meth:`~pyspark.sql.readwriter.DataFrameWriterV2.partitionedBy`
method of the `DataFrameWriterV2`.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.days(_to_java_column(col)))
def hours(col):
"""
Partition transform function: A transform for timestamps
to partition data into hours.
.. versionadded:: 3.1.0
Examples
--------
>>> df.writeTo("catalog.db.table").partitionedBy( # doctest: +SKIP
... hours("ts")
... ).createOrReplace()
Notes
-----
This function can be used only in combinatiion with
:py:meth:`~pyspark.sql.readwriter.DataFrameWriterV2.partitionedBy`
method of the `DataFrameWriterV2`.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.hours(_to_java_column(col)))
def bucket(numBuckets, col):
"""
Partition transform function: A transform for any type that partitions
by a hash of the input column.
.. versionadded:: 3.1.0
Examples
--------
>>> df.writeTo("catalog.db.table").partitionedBy( # doctest: +SKIP
... bucket(42, "ts")
... ).createOrReplace()
Notes
-----
This function can be used only in combination with
:py:meth:`~pyspark.sql.readwriter.DataFrameWriterV2.partitionedBy`
method of the `DataFrameWriterV2`.
"""
if not isinstance(numBuckets, (int, Column)):
raise TypeError(
"numBuckets should be a Column or an int, got {}".format(type(numBuckets))
)
sc = SparkContext._active_spark_context
numBuckets = (
_create_column_from_literal(numBuckets)
if isinstance(numBuckets, int)
else _to_java_column(numBuckets)
)
return Column(sc._jvm.functions.bucket(numBuckets, _to_java_column(col)))
# ---------------------------- User Defined Function ----------------------------------
def udf(f=None, returnType=StringType()):
"""Creates a user defined function (UDF).
.. versionadded:: 1.3.0
Parameters
----------
f : function
python function if used as a standalone function
returnType : :class:`pyspark.sql.types.DataType` or str
the return type of the user-defined function. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
Examples
--------
>>> from pyspark.sql.types import IntegerType
>>> slen = udf(lambda s: len(s), IntegerType())
>>> @udf
... def to_upper(s):
... if s is not None:
... return s.upper()
...
>>> @udf(returnType=IntegerType())
... def add_one(x):
... if x is not None:
... return x + 1
...
>>> df = spark.createDataFrame([(1, "John Doe", 21)], ("id", "name", "age"))
>>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")).show()
+----------+--------------+------------+
|slen(name)|to_upper(name)|add_one(age)|
+----------+--------------+------------+
| 8| JOHN DOE| 22|
+----------+--------------+------------+
Notes
-----
The user-defined functions are considered deterministic by default. Due to
optimization, duplicate invocations may be eliminated or the function may even be invoked
more times than it is present in the query. If your function is not deterministic, call
`asNondeterministic` on the user defined function. E.g.:
>>> from pyspark.sql.types import IntegerType
>>> import random
>>> random_udf = udf(lambda: int(random.random() * 100), IntegerType()).asNondeterministic()
The user-defined functions do not support conditional expressions or short circuiting
in boolean expressions and it ends up with being executed all internally. If the functions
can fail on special rows, the workaround is to incorporate the condition into the functions.
The user-defined functions do not take keyword arguments on the calling side.
"""
# The following table shows most of Python data and SQL type conversions in normal UDFs that
# are not yet visible to the user. Some of behaviors are buggy and might be changed in the near
# future. The table might have to be eventually documented externally.
# Please see SPARK-28131's PR to see the codes in order to generate the table below.
#
# +-----------------------------+--------------+----------+------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+----------------------------+------------+--------------+------------------+----------------------+ # noqa
# |SQL Type \ Python Value(Type)|None(NoneType)|True(bool)|1(int)| a(str)| 1970-01-01(date)|1970-01-01 00:00:00(datetime)|1.0(float)|array('i', [1])(array)|[1](list)| (1,)(tuple)|bytearray(b'ABC')(bytearray)| 1(Decimal)|{'a': 1}(dict)|Row(kwargs=1)(Row)|Row(namedtuple=1)(Row)| # noqa
# +-----------------------------+--------------+----------+------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+----------------------------+------------+--------------+------------------+----------------------+ # noqa
# | boolean| None| True| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | tinyint| None| None| 1| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | smallint| None| None| 1| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | int| None| None| 1| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | bigint| None| None| 1| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | string| None| 'true'| '1'| 'a'|'java.util.Gregor...| 'java.util.Gregor...| '1.0'| '[I@66cbb73a'| '[1]'|'[Ljava.lang.Obje...| '[B@5a51eb1a'| '1'| '{a=1}'| X| X| # noqa
# | date| None| X| X| X|datetime.date(197...| datetime.date(197...| X| X| X| X| X| X| X| X| X| # noqa
# | timestamp| None| X| X| X| X| datetime.datetime...| X| X| X| X| X| X| X| X| X| # noqa
# | float| None| None| None| None| None| None| 1.0| None| None| None| None| None| None| X| X| # noqa
# | double| None| None| None| None| None| None| 1.0| None| None| None| None| None| None| X| X| # noqa
# | array<int>| None| None| None| None| None| None| None| [1]| [1]| [1]| [65, 66, 67]| None| None| X| X| # noqa
# | binary| None| None| None|bytearray(b'a')| None| None| None| None| None| None| bytearray(b'ABC')| None| None| X| X| # noqa
# | decimal(10,0)| None| None| None| None| None| None| None| None| None| None| None|Decimal('1')| None| X| X| # noqa
# | map<string,int>| None| None| None| None| None| None| None| None| None| None| None| None| {'a': 1}| X| X| # noqa
# | struct<_1:int>| None| X| X| X| X| X| X| X|Row(_1=1)| Row(_1=1)| X| X| Row(_1=None)| Row(_1=1)| Row(_1=1)| # noqa
# +-----------------------------+--------------+----------+------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+----------------------------+------------+--------------+------------------+----------------------+ # noqa
#
# Note: DDL formatted string is used for 'SQL Type' for simplicity. This string can be
# used in `returnType`.
# Note: The values inside of the table are generated by `repr`.
# Note: 'X' means it throws an exception during the conversion.
# Note: Python 3.7.3 is used.
# decorator @udf, @udf(), @udf(dataType())
if f is None or isinstance(f, (str, DataType)):
# If DataType has been passed as a positional argument
# for decorator use it as a returnType
return_type = f or returnType
return functools.partial(_create_udf, returnType=return_type,
evalType=PythonEvalType.SQL_BATCHED_UDF)
else:
return _create_udf(f=f, returnType=returnType,
evalType=PythonEvalType.SQL_BATCHED_UDF)
def _test():
import doctest
from pyspark.sql import Row, SparkSession
import pyspark.sql.functions
globs = pyspark.sql.functions.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.functions tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
globs['df'] = spark.createDataFrame([Row(age=2, name='Alice'), Row(age=5, name='Bob')])
(failure_count, test_count) = doctest.testmod(
pyspark.sql.functions, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
kmike/scikit-learn | sklearn/neighbors/graph.py | 14 | 2839 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD, (C) INRIA, University of Amsterdam
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def kneighbors_graph(X, n_neighbors, mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.todense()
matrix([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors).fit(X)
return X.kneighbors_graph(X._fit_X, n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.todense()
matrix([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius).fit(X)
return X.radius_neighbors_graph(X._fit_X, radius, mode)
| bsd-3-clause |
louispotok/pandas | pandas/tests/extension/base/__init__.py | 1 | 1836 | """Base test suite for extension arrays.
These tests are intended for third-party libraries to subclass to validate
that their extension arrays and dtypes satisfy the interface. Moving or
renaming the tests should not be done lightly.
Libraries are expected to implement a few pytest fixtures to provide data
for the tests. The fixtures may be located in either
* The same module as your test class.
* A ``conftest.py`` in the same directory as your test class.
The full list of fixtures may be found in the ``conftest.py`` next to this
file.
.. code-block:: python
import pytest
from pandas.tests.extension.base import BaseDtypeTests
@pytest.fixture
def dtype():
return MyDtype()
class TestMyDtype(BaseDtypeTests):
pass
Your class ``TestDtype`` will inherit all the tests defined on
``BaseDtypeTests``. pytest's fixture discover will supply your ``dtype``
wherever the test requires it. You're free to implement additional tests.
All the tests in these modules use ``self.assert_frame_equal`` or
``self.assert_series_equal`` for dataframe or series comparisons. By default,
they use the usual ``pandas.testing.assert_frame_equal`` and
``pandas.testing.assert_series_equal``. You can override the checks used
by defining the staticmethods ``assert_frame_equal`` and
``assert_series_equal`` on your base test class.
"""
from .casting import BaseCastingTests # noqa
from .constructors import BaseConstructorsTests # noqa
from .dtype import BaseDtypeTests # noqa
from .getitem import BaseGetitemTests # noqa
from .groupby import BaseGroupbyTests # noqa
from .interface import BaseInterfaceTests # noqa
from .methods import BaseMethodsTests # noqa
from .missing import BaseMissingTests # noqa
from .reshaping import BaseReshapingTests # noqa
from .setitem import BaseSetitemTests # noqa
| bsd-3-clause |
jaidevd/scikit-learn | sklearn/datasets/tests/test_base.py | 13 | 8907 | import os
import shutil
import tempfile
import warnings
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import with_setup
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
# test return_X_y option
X_y_tuple = load_digits(return_X_y=True)
bunch = load_digits()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
assert_equal(len(res.feature_names), 10)
# test return_X_y option
X_y_tuple = load_diabetes(return_X_y=True)
bunch = load_diabetes()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_linnerud(return_X_y=True)
bunch = load_linnerud()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_iris(return_X_y=True)
bunch = load_iris()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_breast_cancer():
res = load_breast_cancer()
assert_equal(res.data.shape, (569, 30))
assert_equal(res.target.size, 569)
assert_equal(res.target_names.size, 2)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_breast_cancer(return_X_y=True)
bunch = load_breast_cancer()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_boston(return_X_y=True)
bunch = load_boston()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
def test_bunch_pickle_generated_with_0_16_and_read_with_0_17():
bunch = Bunch(key='original')
# This reproduces a problem when Bunch pickles have been created
# with scikit-learn 0.16 and are read with 0.17. Basically there
# is a suprising behaviour because reading bunch.key uses
# bunch.__dict__ (which is non empty for 0.16 Bunch objects)
# whereas assigning into bunch.key uses bunch.__setattr__. See
# https://github.com/scikit-learn/scikit-learn/issues/6196 for
# more details
bunch.__dict__['key'] = 'set from __dict__'
bunch_from_pkl = loads(dumps(bunch))
# After loading from pickle the __dict__ should have been ignored
assert_equal(bunch_from_pkl.key, 'original')
assert_equal(bunch_from_pkl['key'], 'original')
# Making sure that changing the attr does change the value
# associated with __getitem__ as well
bunch_from_pkl.key = 'changed'
assert_equal(bunch_from_pkl.key, 'changed')
assert_equal(bunch_from_pkl['key'], 'changed')
def test_bunch_dir():
# check that dir (important for autocomplete) shows attributes
data = load_iris()
assert_true("data" in dir(data))
| bsd-3-clause |
kod3r/sklearn-pmml | sklearn_pmml/convert/tree.py | 3 | 6029 | from functools import partial
from sklearn.base import ClassifierMixin, RegressorMixin
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.tree._tree import Tree, TREE_LEAF
import numpy as np
from sklearn_pmml.convert.model import EstimatorConverter
from sklearn_pmml.convert.features import Feature, CategoricalFeature, NumericFeature
import sklearn_pmml.pmml as pmml
from sklearn_pmml.convert.utils import estimator_to_converter
class DecisionTreeConverter(EstimatorConverter):
SPLIT_BINARY = 'binarySplit'
OPERATOR_LE = 'lessOrEqual'
NODE_ROOT = 0
OUTPUT_PROBABILITY = 'proba'
OUTPUT_LABEL = 'proba'
def __init__(self, estimator, context, mode):
super(DecisionTreeConverter, self).__init__(estimator, context, mode)
assert len(self.context.schemas[self.SCHEMA_OUTPUT]) == 1, 'Only one-label trees are supported'
assert hasattr(estimator, 'tree_'), 'Estimator has no tree_ attribute'
if mode == self.MODE_CLASSIFICATION:
if isinstance(self.context.schemas[self.SCHEMA_OUTPUT][0], CategoricalFeature):
self.prediction_output = self.OUTPUT_LABEL
else:
self.prediction_output = self.OUTPUT_PROBABILITY
assert isinstance(self.estimator, ClassifierMixin), \
'Only a classifier can be serialized in classification mode'
if mode == self.MODE_REGRESSION:
assert isinstance(self.context.schemas[self.SCHEMA_OUTPUT][0], NumericFeature), \
'Only a numeric feature can be an output of regression'
assert isinstance(self.estimator, RegressorMixin), \
'Only a regressor can be serialized in regression mode'
assert estimator.tree_.value.shape[1] == len(self.context.schemas[self.SCHEMA_OUTPUT]), \
'Tree outputs {} results while the schema specifies {} output fields'.format(
estimator.tree_.value.shape[1], len(self.context.schemas[self.SCHEMA_OUTPUT]))
def _model(self):
assert self.SCHEMA_NUMERIC in self.context.schemas, \
'Either build transformation dictionary or provide {} schema in context'.format(self.SCHEMA_NUMERIC)
tm = pmml.TreeModel(functionName=self.model_function_name, splitCharacteristic=self.SPLIT_BINARY)
tm.append(self.mining_schema())
tm.append(self.output())
tm.Node = self._transform_node(
self.estimator.tree_,
self.NODE_ROOT,
self.context.schemas[self.SCHEMA_NUMERIC],
self.context.schemas[self.SCHEMA_OUTPUT][0]
)
return tm
def model(self, verification_data=None):
assert self.SCHEMA_NUMERIC in self.context.schemas, \
'Either build transformation dictionary or provide {} schema in context'.format(self.SCHEMA_NUMERIC)
tm = self._model()
if verification_data is not None:
tm.append(self.model_verification(verification_data))
return tm
def _transform_node(self, tree, index, input_schema, output_feature, enter_condition=None):
"""
Recursive mapping of sklearn Tree into PMML Node tree
:return: Node element
"""
assert isinstance(tree, Tree)
assert isinstance(input_schema, list)
assert isinstance(output_feature, Feature)
node = pmml.Node()
if enter_condition is None:
node.append(pmml.True_())
else:
node.append(enter_condition)
node.recordCount = tree.n_node_samples[index]
if tree.children_left[index] != TREE_LEAF:
feature = input_schema[tree.feature[index]]
assert isinstance(feature, Feature)
left_child = self._transform_node(
tree,
tree.children_left[index],
input_schema,
output_feature,
enter_condition=pmml.SimplePredicate(
field=feature.full_name, operator=DecisionTreeConverter.OPERATOR_LE, value_=tree.threshold[index]
)
)
right_child = self._transform_node(tree, tree.children_right[index], input_schema, output_feature)
if self.model_function_name == self.MODE_CLASSIFICATION:
score, score_prob = None, 0.0
for i in range(len(tree.value[index][0])):
left_score = left_child.ScoreDistribution[i]
right_score = right_child.ScoreDistribution[i]
prob = float(left_score.recordCount + right_score.recordCount) / node.recordCount
node.append(pmml.ScoreDistribution(
recordCount=left_score.recordCount + right_score.recordCount,
value_=left_score.value_,
confidence=prob
))
if score_prob < prob:
score, score_prob = left_score.value_, prob
node.score = score
node.append(left_child).append(right_child)
else:
node_value = np.array(tree.value[index][0])
if self.model_function_name == self.MODE_CLASSIFICATION:
probs = node_value / float(node_value.sum())
for i in range(len(probs)):
node.append(pmml.ScoreDistribution(
confidence=probs[i],
recordCount=node_value[i],
value_=output_feature.from_number(i)
))
node.score = output_feature.from_number(probs.argmax())
elif self.model_function_name == self.MODE_REGRESSION:
node.score = node_value[0]
return node
estimator_to_converter[DecisionTreeClassifier] = partial(
DecisionTreeConverter, mode=DecisionTreeConverter.MODE_CLASSIFICATION
)
estimator_to_converter[DecisionTreeRegressor] = partial(
DecisionTreeConverter, mode=DecisionTreeConverter.MODE_REGRESSION
) | mit |
lekshmideepu/nest-simulator | pynest/examples/gif_population.py | 8 | 5045 | # -*- coding: utf-8 -*-
#
# gif_population.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Population of GIF neuron model with oscillatory behavior
--------------------------------------------------------
This script simulates a population of generalized integrate-and-fire (GIF)
model neurons driven by noise from a group of Poisson generators.
Due to spike-frequency adaptation, the GIF neurons tend to show oscillatory
behavior on the time scale comparable with the time constant of adaptation
elements (stc and sfa).
Population dynamics are visualized by raster plot and as average firing rate.
References
~~~~~~~~~~
.. [1] Schwalger T, Degert M, Gerstner W (2017). Towards a theory of cortical columns: From spiking
neurons to interacting neural populations of finite size. PLoS Comput Biol.
https://doi.org/10.1371/journal.pcbi.1005507
.. [2] Mensi S, Naud R, Pozzorini C, Avermann M, Petersen CC and
Gerstner W (2012). Parameter extraction and classification of
three cortical neuron types reveals two distinct adaptation
mechanisms. Journal of Neurophysiology. 107(6), pp.1756-1775.
"""
###############################################################################
# Import all necessary modules for simulation and plotting.
import nest
import nest.raster_plot
import matplotlib.pyplot as plt
nest.ResetKernel()
###############################################################################
# Assigning the simulation parameters to variables.
dt = 0.1
simtime = 2000.0
###############################################################################
# Definition of neural parameters for the GIF model. These parameters are
# extracted by fitting the model to experimental data [2]_.
neuron_params = {"C_m": 83.1,
"g_L": 3.7,
"E_L": -67.0,
"Delta_V": 1.4,
"V_T_star": -39.6,
"t_ref": 4.0,
"V_reset": -36.7,
"lambda_0": 1.0,
"q_stc": [56.7, -6.9],
"tau_stc": [57.8, 218.2],
"q_sfa": [11.7, 1.8],
"tau_sfa": [53.8, 640.0],
"tau_syn_ex": 10.0,
}
###############################################################################
# Definition of the parameters for the population of GIF neurons.
N_ex = 100 # size of the population
p_ex = 0.3 # connection probability inside the population
w_ex = 30.0 # synaptic weights inside the population (pA)
###############################################################################
# Definition of the parameters for the Poisson group and its connection with
# GIF neurons population.
N_noise = 50 # size of Poisson group
rate_noise = 10.0 # firing rate of Poisson neurons (Hz)
w_noise = 20.0 # synaptic weights from Poisson to population neurons (pA)
###############################################################################
# Configuration of the simulation kernel with the previously defined time
# resolution.
nest.SetKernelStatus({"resolution": dt})
###############################################################################
# Building a population of GIF neurons, a group of Poisson neurons and a
# spike recorder device for capturing spike times of the population.
population = nest.Create("gif_psc_exp", N_ex, params=neuron_params)
noise = nest.Create("poisson_generator", N_noise, params={'rate': rate_noise})
spike_det = nest.Create("spike_recorder")
###############################################################################
# Build connections inside the population of GIF neurons population, between
# Poisson group and the population, and also connecting spike recorder to
# the population.
nest.Connect(
population, population, {'rule': 'pairwise_bernoulli', 'p': p_ex},
syn_spec={"weight": w_ex}
)
nest.Connect(noise, population, 'all_to_all', syn_spec={"weight": w_noise})
nest.Connect(population, spike_det)
###############################################################################
# Simulation of the network.
nest.Simulate(simtime)
###############################################################################
# Plotting the results of simulation including raster plot and histogram of
# population activity.
nest.raster_plot.from_device(spike_det, hist=True)
plt.title('Population dynamics')
plt.show()
| gpl-2.0 |
icdishb/scikit-learn | sklearn/decomposition/tests/test_kernel_pca.py | 14 | 8137 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import (assert_array_almost_equal, assert_less,
assert_equal, assert_not_equal,
assert_raises)
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.metrics.pairwise import rbf_kernel
def test_kernel_pca():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
for eigen_solver in ("auto", "dense", "arpack"):
for kernel in ("linear", "rbf", "poly", histogram):
# histogram kernel produces singular matrix inside linalg.solve
# XXX use a least-squares approximation?
inv = not callable(kernel)
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=inv)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# non-regression test: previously, gamma would be 0 by default,
# forcing all eigenvalues to 0 under the poly kernel
assert_not_equal(X_fit_transformed, [])
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
if inv:
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert_equal(X_pred2.shape, X_pred.shape)
def test_invalid_parameters():
assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True,
kernel='precomputed')
def test_kernel_pca_sparse():
rng = np.random.RandomState(0)
X_fit = sp.csr_matrix(rng.random_sample((5, 4)))
X_pred = sp.csr_matrix(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=False)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
# X_pred2 = kpca.inverse_transform(X_pred_transformed)
# assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_linear_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
# for a linear kernel, kernel PCA should find the same projection as PCA
# modulo the sign (direction)
# fit only the first four components: fifth is near zero eigenvalue, so
# can be trimmed due to roundoff error
assert_array_almost_equal(
np.abs(KernelPCA(4).fit(X_fit).transform(X_pred)),
np.abs(PCA(4).fit(X_fit).transform(X_pred)))
def test_kernel_pca_n_components():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
for c in [1, 2, 4]:
kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
shape = kpca.fit(X_fit).transform(X_pred).shape
assert_equal(shape, (2, c))
def test_remove_zero_eig():
X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
# n_components=None (default) => remove_zero_eig is True
kpca = KernelPCA()
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
kpca = KernelPCA(n_components=2)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 2))
kpca = KernelPCA(n_components=2, remove_zero_eig=True)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
def test_kernel_pca_precomputed():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
X_kpca = KernelPCA(4, eigen_solver=eigen_solver).\
fit(X_fit).transform(X_pred)
X_kpca2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_pred, X_fit.T))
X_kpca_train = KernelPCA(
4, eigen_solver=eigen_solver,
kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_fit, X_fit.T))
assert_array_almost_equal(np.abs(X_kpca),
np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train),
np.abs(X_kpca_train2))
def test_kernel_pca_invalid_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((2, 4))
kpca = KernelPCA(kernel="tototiti")
assert_raises(ValueError, kpca.fit, X_fit)
def test_gridsearch_pipeline():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="rbf", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
grid_search.fit(X, y)
assert_equal(grid_search.best_score_, 1)
def test_gridsearch_pipeline_precomputed():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model using a precomputed kernel.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(Perceptron__n_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.)
grid_search.fit(X_kernel, y)
assert_equal(grid_search.best_score_, 1)
def test_nested_circles():
# Test the linear separability of the first 2D KPCA transform
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron().fit(X, y).score(X, y)
assert_less(train_score, 0.8)
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(kernel="rbf", n_components=2,
fit_inverse_transform=True, gamma=2.)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron().fit(X_kpca, y).score(X_kpca, y)
assert_equal(train_score, 1.0)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
glenngillen/dotfiles | .vscode/extensions/ms-python.python-2021.5.842923320/pythonFiles/lib/python/debugpy/_vendored/pydevd/pydev_ipython/matplotlibtools.py | 1 | 5591 |
import sys
from _pydev_bundle import pydev_log
backends = {'tk': 'TkAgg',
'gtk': 'GTKAgg',
'wx': 'WXAgg',
'qt': 'Qt4Agg', # qt3 not supported
'qt4': 'Qt4Agg',
'qt5': 'Qt5Agg',
'osx': 'MacOSX'}
# We also need a reverse backends2guis mapping that will properly choose which
# GUI support to activate based on the desired matplotlib backend. For the
# most part it's just a reverse of the above dict, but we also need to add a
# few others that map to the same GUI manually:
backend2gui = dict(zip(backends.values(), backends.keys()))
backend2gui['Qt4Agg'] = 'qt4'
backend2gui['Qt5Agg'] = 'qt5'
# In the reverse mapping, there are a few extra valid matplotlib backends that
# map to the same GUI support
backend2gui['GTK'] = backend2gui['GTKCairo'] = 'gtk'
backend2gui['WX'] = 'wx'
backend2gui['CocoaAgg'] = 'osx'
def do_enable_gui(guiname):
from _pydev_bundle.pydev_versioncheck import versionok_for_gui
if versionok_for_gui():
try:
from pydev_ipython.inputhook import enable_gui
enable_gui(guiname)
except:
sys.stderr.write("Failed to enable GUI event loop integration for '%s'\n" % guiname)
pydev_log.exception()
elif guiname not in ['none', '', None]:
# Only print a warning if the guiname was going to do something
sys.stderr.write("Debug console: Python version does not support GUI event loop integration for '%s'\n" % guiname)
# Return value does not matter, so return back what was sent
return guiname
def find_gui_and_backend():
"""Return the gui and mpl backend."""
matplotlib = sys.modules['matplotlib']
# WARNING: this assumes matplotlib 1.1 or newer!!
backend = matplotlib.rcParams['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for IPython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
return gui, backend
def is_interactive_backend(backend):
""" Check if backend is interactive """
matplotlib = sys.modules['matplotlib']
from matplotlib.rcsetup import interactive_bk, non_interactive_bk # @UnresolvedImport
if backend in interactive_bk:
return True
elif backend in non_interactive_bk:
return False
else:
return matplotlib.is_interactive()
def patch_use(enable_gui_function):
""" Patch matplotlib function 'use' """
matplotlib = sys.modules['matplotlib']
def patched_use(*args, **kwargs):
matplotlib.real_use(*args, **kwargs)
gui, backend = find_gui_and_backend()
enable_gui_function(gui)
matplotlib.real_use = matplotlib.use
matplotlib.use = patched_use
def patch_is_interactive():
""" Patch matplotlib function 'use' """
matplotlib = sys.modules['matplotlib']
def patched_is_interactive():
return matplotlib.rcParams['interactive']
matplotlib.real_is_interactive = matplotlib.is_interactive
matplotlib.is_interactive = patched_is_interactive
def activate_matplotlib(enable_gui_function):
"""Set interactive to True for interactive backends.
enable_gui_function - Function which enables gui, should be run in the main thread.
"""
matplotlib = sys.modules['matplotlib']
gui, backend = find_gui_and_backend()
is_interactive = is_interactive_backend(backend)
if is_interactive:
enable_gui_function(gui)
if not matplotlib.is_interactive():
sys.stdout.write("Backend %s is interactive backend. Turning interactive mode on.\n" % backend)
matplotlib.interactive(True)
else:
if matplotlib.is_interactive():
sys.stdout.write("Backend %s is non-interactive backend. Turning interactive mode off.\n" % backend)
matplotlib.interactive(False)
patch_use(enable_gui_function)
patch_is_interactive()
def flag_calls(func):
"""Wrap a function to detect and flag when it gets called.
This is a decorator which takes a function and wraps it in a function with
a 'called' attribute. wrapper.called is initialized to False.
The wrapper.called attribute is set to False right before each call to the
wrapped function, so if the call fails it remains False. After the call
completes, wrapper.called is set to True and the output is returned.
Testing for truth in wrapper.called allows you to determine if a call to
func() was attempted and succeeded."""
# don't wrap twice
if hasattr(func, 'called'):
return func
def wrapper(*args, **kw):
wrapper.called = False
out = func(*args, **kw)
wrapper.called = True
return out
wrapper.called = False
wrapper.__doc__ = func.__doc__
return wrapper
def activate_pylab():
pylab = sys.modules['pylab']
pylab.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
pylab.draw_if_interactive = flag_calls(pylab.draw_if_interactive)
def activate_pyplot():
pyplot = sys.modules['matplotlib.pyplot']
pyplot.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
pyplot.draw_if_interactive = flag_calls(pyplot.draw_if_interactive)
| mit |
HyukjinKwon/spark | python/pyspark/pandas/tests/indexes/test_category.py | 15 | 4626 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import pandas as pd
from pandas.api.types import CategoricalDtype
import pyspark.pandas as ps
from pyspark.testing.pandasutils import PandasOnSparkTestCase, TestUtils
class CategoricalIndexTest(PandasOnSparkTestCase, TestUtils):
def test_categorical_index(self):
pidx = pd.CategoricalIndex([1, 2, 3])
psidx = ps.CategoricalIndex([1, 2, 3])
self.assert_eq(psidx, pidx)
self.assert_eq(psidx.categories, pidx.categories)
self.assert_eq(psidx.codes, pd.Index(pidx.codes))
self.assert_eq(psidx.ordered, pidx.ordered)
pidx = pd.Index([1, 2, 3], dtype="category")
psidx = ps.Index([1, 2, 3], dtype="category")
self.assert_eq(psidx, pidx)
self.assert_eq(psidx.categories, pidx.categories)
self.assert_eq(psidx.codes, pd.Index(pidx.codes))
self.assert_eq(psidx.ordered, pidx.ordered)
pdf = pd.DataFrame(
{
"a": pd.Categorical([1, 2, 3, 1, 2, 3]),
"b": pd.Categorical(["a", "b", "c", "a", "b", "c"], categories=["c", "b", "a"]),
},
index=pd.Categorical([10, 20, 30, 20, 30, 10], categories=[30, 10, 20], ordered=True),
)
psdf = ps.from_pandas(pdf)
pidx = pdf.set_index("b").index
psidx = psdf.set_index("b").index
self.assert_eq(psidx, pidx)
self.assert_eq(psidx.categories, pidx.categories)
self.assert_eq(psidx.codes, pd.Index(pidx.codes))
self.assert_eq(psidx.ordered, pidx.ordered)
pidx = pdf.set_index(["a", "b"]).index.get_level_values(0)
psidx = psdf.set_index(["a", "b"]).index.get_level_values(0)
self.assert_eq(psidx, pidx)
self.assert_eq(psidx.categories, pidx.categories)
self.assert_eq(psidx.codes, pd.Index(pidx.codes))
self.assert_eq(psidx.ordered, pidx.ordered)
def test_astype(self):
pidx = pd.Index(["a", "b", "c"])
psidx = ps.from_pandas(pidx)
self.assert_eq(psidx.astype("category"), pidx.astype("category"))
self.assert_eq(
psidx.astype(CategoricalDtype(["c", "a", "b"])),
pidx.astype(CategoricalDtype(["c", "a", "b"])),
)
pcidx = pidx.astype(CategoricalDtype(["c", "a", "b"]))
kcidx = psidx.astype(CategoricalDtype(["c", "a", "b"]))
self.assert_eq(kcidx.astype("category"), pcidx.astype("category"))
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(
kcidx.astype(CategoricalDtype(["b", "c", "a"])),
pcidx.astype(CategoricalDtype(["b", "c", "a"])),
)
else:
self.assert_eq(
kcidx.astype(CategoricalDtype(["b", "c", "a"])),
pidx.astype(CategoricalDtype(["b", "c", "a"])),
)
self.assert_eq(kcidx.astype(str), pcidx.astype(str))
def test_factorize(self):
pidx = pd.CategoricalIndex([1, 2, 3, None])
psidx = ps.from_pandas(pidx)
pcodes, puniques = pidx.factorize()
kcodes, kuniques = psidx.factorize()
self.assert_eq(kcodes.tolist(), pcodes.tolist())
self.assert_eq(kuniques, puniques)
pcodes, puniques = pidx.factorize(na_sentinel=-2)
kcodes, kuniques = psidx.factorize(na_sentinel=-2)
self.assert_eq(kcodes.tolist(), pcodes.tolist())
self.assert_eq(kuniques, puniques)
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.indexes.test_category import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
phev8/ward-metrics | wardmetrics/utils.py | 1 | 17734 | # TODO: implement event creation out of frame based results
# TODO: implement wrappers for pandas row based events
# TODO: wrapper for multiclass case
def frame_results_to_events(frame_results, frame_times=None):
"""
Converting frame-by-frame results into a list of events for each label. If the classifier predicts the same label in a sequence it is considers as an event. (Filtering too short events, or merge close-by events is not included here.)
Arguments:
frame_results (list or numpy array): list of frame class labels in an temporal order (sequential). It can contain numeric or string values e.g. ``[1, 1, 0, ...]`` or ``['class_1', 'class_1', 'class_0', ...]``
frame_times (list or numpy array): list of timestamps (preferably as numeric values e.g. posix time) for each frame.
Returns:
dictionary: list of events (tuple of start and end times/indexes) for each unique label found in the frame_results. Event start value is the first occurence of the label, event end is the time or index of the next frame after the event (also the start of the next event).
"""
if len(frame_results) < 2:
raise ValueError("frame_results has to contain at least 2 items.")
if frame_times is not None and len(frame_results) != len(frame_times):
raise ValueError("Length of frame_results and frame_times has to be equal.")
unique_labels = set(frame_results)
# Init event lists for each label:
results = {}
for l in unique_labels:
results[str(l)] = []
event_label = frame_results[0]
event_start_index = 0
for index in range(1, len(frame_results)):
if frame_results[index] != event_label:
# close event:
if frame_times is None:
results[str(event_label)].append((event_start_index, index))
else:
results[str(event_label)].append((frame_times[event_start_index], frame_times[index]))
# start new event:
event_label = frame_results[index]
event_start_index = index
# close last event:
if frame_times is None:
results[str(event_label)].append((event_start_index, index+1))
else:
results[str(event_label)].append((frame_times[event_start_index], frame_times[-1] + float(frame_times[-1] - frame_times[0])/(len(frame_times)-1) ))
return results
def print_standard_event_metrics(standard_event_results):
"""
Print standard precision and recall values
Examples:
>>> print_standard_event_metrics(test_r)
Standard event results:
precision:\t0.8888888\tWeighted by length: 0.9186991
recall:\t0.3333333\tWeighted by length: 0.2230576
"""
print("Standard event results:")
print("\tprecision:\t" + str(standard_event_results["precision"]) + "\tWeighted by length:\t" + str(standard_event_results["precision (weighted)"]))
print("\trecall:\t\t" + str(standard_event_results["recall"]) + "\tWeighted by length:\t" + str(standard_event_results["recall (weighted)"]))
def standard_event_metrics_to_list(standard_event_results):
""" Converting standard event metric results to a list (position of each item is fixed)
Argument:
standard_event_results (dictionary): as provided by the 4th item in the results of eval_events function
Returns:
list: Item order: 1. Precision, 2. Recall 3. Length weighted precision, 4. Length weighted recall
"""
return [
standard_event_results["precision"],
standard_event_results["recall"],
standard_event_results["precision (weighted)"],
standard_event_results["recall (weighted)"]]
def standard_event_metrics_to_string(standard_event_results, separator=", ", prefix="[", suffix="]"):
""" Converting standard event metric results to a string
Argument:
standard_event_results (dictionary): as provided by the 4th item in the results of eval_events function
Keyword Arguments:
separator (str): characters between each item
prefix (str): string that will be added before the line
suffix (str): string that will be added to the end of the line
Returns:
str: Item order: 1. Precision, 2. Recall 3. Length weighted precision, 4. Length weighted recall
Examples:
>>> standard_event_metrics_to_string(test_r)
[0.88888, 0.33333, 0.918, 0.22305]
>>> standard_event_metrics_to_string(test_r, separator="\t", prefix="/", suffix="/")
/0.88888\t0.33333\t0.918\t0.22305/
>>> standard_event_metrics_to_string(test_r, prefix="", suffix="\\n")
0.88888, 0.33333, 0.918, 0.22305\\n
"""
return prefix + separator.join(map(str, standard_event_metrics_to_list(standard_event_results))) + suffix
def print_detailed_event_metrics(detailed_event_results):
"""
Print totals for each event category
Example:
>>> print_detailed_event_metrics(test_r)
Detailed event results:
Actual events:
deletions:\t\t1 12.50% of actual events
merged:\t\t3 37.50% of actual events
fragmented:\t\t1 12.50% of actual events
frag. and merged:\t1 12.50% of actual events
correct:\t\t2 25.00% of actual events
Detected events:
insertions:\t\t1 11.11% of detected events
merging:\t\t1 11.11% of detected events
fragmenting:\t\t4 44.44% of detected events
frag. and merging:\t1 11.11% of detected events
correct:\t\t2 22.22% of detected events
"""
print("Detailed event results:")
print("\tActual events:")
print("\t\tdeletions:\t\t\t" + str(detailed_event_results["D"]) + "\t" + "{0:.2f}".format(detailed_event_results["D"]*100/detailed_event_results["total_gt"]) + "% of actual events")
print("\t\tmerged:\t\t\t\t" + str(detailed_event_results["M"]) + "\t" + "{0:.2f}".format(detailed_event_results["M"]*100/detailed_event_results["total_gt"]) + "% of actual events")
print("\t\tfragmented:\t\t\t" + str(detailed_event_results["F"]) + "\t" + "{0:.2f}".format(detailed_event_results["F"]*100/detailed_event_results["total_gt"]) + "% of actual events")
print("\t\tfrag. and merged:\t" + str(detailed_event_results["FM"]) + "\t" + "{0:.2f}".format(detailed_event_results["FM"]*100/detailed_event_results["total_gt"]) + "% of actual events")
print("\t\tcorrect:\t\t\t" + str(detailed_event_results["C"]) + "\t" + "{0:.2f}".format(detailed_event_results["C"]*100/detailed_event_results["total_gt"]) + "% of actual events")
print("\tDetected events:")
print("\t\tinsertions:\t\t\t" + str(detailed_event_results["I'"]) + "\t" + "{0:.2f}".format(detailed_event_results["I'"]*100/detailed_event_results["total_det"]) + "% of detected events")
print("\t\tmerging:\t\t\t" + str(detailed_event_results["M'"]) + "\t" + "{0:.2f}".format(detailed_event_results["M'"]*100/detailed_event_results["total_det"]) + "% of detected events")
print("\t\tfragmenting:\t\t" + str(detailed_event_results["F'"]) + "\t" + "{0:.2f}".format(detailed_event_results["F'"]*100/detailed_event_results["total_det"]) + "% of detected events")
print("\t\tfrag. and merging:\t" + str(detailed_event_results["FM'"]) + "\t" + "{0:.2f}".format(detailed_event_results["FM'"]*100/detailed_event_results["total_det"]) + "% of detected events")
print("\t\tcorrect:\t\t\t" + str(detailed_event_results["C"]) + "\t" + "{0:.2f}".format(detailed_event_results["C"]*100/detailed_event_results["total_det"]) + "% of detected events")
#print("\trecall:\t\t" + str(standard_event_results["recall"]) + "\tWeighted by length:\t" + str(standard_event_results["recall (weighted)"]))
def detailed_event_metrics_to_list(detailed_event_results):
""" Converting detailed event metric results to a list (position of each item is fixed)
Argument:
detailed_event_results (dictionary): as provided by the 3rd item in the results of eval_events function
Returns:
list: Item order: 0. correct, 1. deletions 2. merged, 3. fragmented, 4. fragmented and merged, 5. fragmenting, 6. merging, 7. fragmenting and merging, 8. insertions, 9. total of actual events, 10. total of detected events
"""
return [
detailed_event_results["C"],
detailed_event_results["D"],
detailed_event_results["M"],
detailed_event_results["F"],
detailed_event_results["FM"],
detailed_event_results["F'"],
detailed_event_results["M'"],
detailed_event_results["FM'"],
detailed_event_results["I'"],
detailed_event_results["total_gt"],
detailed_event_results["total_det"],
]
def detailed_event_metrics_to_string(detailed_event_results, separator=", ", prefix="[", suffix="]"):
""" Converting detailed event metric results to a string
Argument:
detailed_event_results (dictionary): as provided by the 3rd item in the results of eval_events function
Keyword Arguments:
separator (str): characters between each item
prefix (str): string that will be added before the line
suffix (str): string that will be added to the end of the line
Returns:
str: Item order: 0. correct, 1. deletions 2. merged, 3. fragmented, 4. fragmented and merged, 5. fragmenting, 6. merging, 7. fragmenting and merging, 8. insertions, 9. total of actual events, 10. total of detected events
Examples:
>>> detailed_event_metrics_to_string(test_r)
[2, 1, 3, 1, 1, 4, 1, 1, 1, 8, 9]
>>> detailed_event_metrics_to_string(test_r, separator=";", prefix="(", suffix=")\\n")
(2;1;3;1;1;4;1;1;1;8;9)\\n
"""
return prefix + separator.join(map(str, detailed_event_metrics_to_list(detailed_event_results))) + suffix
def print_detailed_segment_results(detailed_segment_results):
"""
Print segment length for each detailed segment category. Can be used with normed values as well.
Arguments:
detailed_segment_results (dictionary): as provided by the 3rd or 4th item in the results of eval_segments function
Example:
>>> print_detailed_segment_results(test_r)
Detailed segment results (length or frame count):
true positive segments:\t\t40
true negative segments:\t\t91
insertion segments:\t\t\t10
deletion segments:\t\t\t10
fragmenting segments:\t\t7
merge segments:\t\t\t15
start overfill segments:\t\t10
end overfill segments:\t\t28
start underfill segments:\t\t13
end underfill segments:\t\t15
"""
print("Detailed segment results (length or frame count):")
print("\ttrue positive segments:\t\t" + str(detailed_segment_results["TP"]))
print("\ttrue negative segments:\t\t" + str(detailed_segment_results["TN"]))
print("\tinsertion segments:\t\t\t" + str(detailed_segment_results["I"]))
print("\tdeletion segments:\t\t\t" + str(detailed_segment_results["D"]))
print("\tfragmenting segments:\t\t" + str(detailed_segment_results["F"]))
print("\tmerge segments:\t\t\t\t" + str(detailed_segment_results["M"]))
print("\tstart overfill segments:\t" + str(detailed_segment_results["Os"]))
print("\tend overfill segments:\t\t" + str(detailed_segment_results["Oe"]))
print("\tstart underfill segments:\t" + str(detailed_segment_results["Us"]))
print("\tend underfill segments:\t\t" + str(detailed_segment_results["Ue"]))
def detailed_segment_results_to_list(detailed_segment_results):
""" Converting detailed segment results to a list (position of each item is fixed). Can be used with normed values as well.
Argument:
detailed_segment_results (dictionary): as provided by the 3rd or 4th item in the results of eval_segments function
Returns:
list: Item order: 0. true posives, 1. true negatives, 2. insertions, 3. deletions, 4. fragmenting, 5. merged, 6. start overfill, 7. end overfill, 8. start underfill, 9. end underfill
"""
return [
detailed_segment_results["TP"],
detailed_segment_results["TN"],
detailed_segment_results["I"],
detailed_segment_results["D"],
detailed_segment_results["F"],
detailed_segment_results["M"],
detailed_segment_results["Os"],
detailed_segment_results["Oe"],
detailed_segment_results["Us"],
detailed_segment_results["Ue"]
]
def detailed_segment_results_to_string(detailed_segment_results, separator=", ", prefix="[", suffix="]"):
""" Converting detailed segment results to a string. Can be used with normed values as well.
Argument:
detailed_segment_results (dictionary): as provided by the 3rd or 4th item in the results of eval_segments function
Keyword Arguments:
separator (str): characters between each item
prefix (str): string that will be added before the line
suffix (str): string that will be added to the end of the line
Returns:
str: Item order: 0. true posives, 1. true negatives, 2. insertions, 3. deletions, 4. fragmenting, 5. merged, 6. start overfill, 7. end overfill, 8. start underfill, 9. end underfill
Examples:
>>> detailed_segment_results_to_string(test_r)
[2, 1, 3, 1, 1, 4, 1, 1, 1, 8]
>>> detailed_segment_results_to_string(test_r, separator=";", prefix="(", suffix=")\\n")
(2;1;3;1;1;4;1;1;1;8)\\n
"""
return prefix + separator.join(map(str, detailed_segment_results_to_list(detailed_segment_results))) + suffix
def print_twoset_segment_metrics(twoset_metrics_results):
"""
Print 2SET metric results
Argument:
twoset_metrics_results (dictionary): as provided by the 1st item in the results of eval_events function
Example:
>>> print_twoset_segment_metrics(test_r)
2SET metrics:
true positive rate:\t\t0.471
deletion rate:\t\t0.118
fragmenting rate:\t\t0.082
start underfill rate:\t0.153
end underfill rate:\t\t0.176
1 - false positive rate:\t0.591
insertion rate:\t\t0.065
merge rate:\t\t\t0.097
start overfill rate:\t0.065
end overfill rate:\t\t0.182
"""
print("2SET metrics:")
print("\ttrue positive rate:\t\t\t" + "{0:.3f}".format(twoset_metrics_results["tpr"]))
print("\tdeletion rate:\t\t\t\t" + "{0:.3f}".format(twoset_metrics_results["dr"]))
print("\tfragmenting rate:\t\t\t" + "{0:.3f}".format(twoset_metrics_results["fr"]))
print("\tstart underfill rate:\t\t" + "{0:.3f}".format(twoset_metrics_results["us"]))
print("\tend underfill rate:\t\t\t" + "{0:.3f}".format(twoset_metrics_results["ue"]))
print("\n\t1 - false positive rate:\t" + "{0:.3f}".format(1 - twoset_metrics_results["fpr"]))
print("\tinsertion rate:\t\t\t\t" + "{0:.3f}".format(twoset_metrics_results["ir"]))
print("\tmerge rate:\t\t\t\t\t" + "{0:.3f}".format(twoset_metrics_results["mr"]))
print("\tstart overfill rate:\t\t" + "{0:.3f}".format(twoset_metrics_results["os"]))
print("\tend overfill rate:\t\t\t" + "{0:.3f}".format(twoset_metrics_results["oe"]))
def twoset_segment_metrics_to_list(twoset_metrics_results):
""" Converting detailed event metric results to a list (position of each item is fixed)
Argument:
twoset_metrics_results (dictionary): as provided by the 1st item in the results of eval_events function
Returns:
list: Item order: 0. true positive rate, 1. deletion rate 2. fragmenting rate, 3. start underfill rate, 4. end underfill rate, 5. 1 - false positive rate, 6. insertion rate, 7. merge rate, 8. start overfill rate, 9. end overfill rate
"""
return [
twoset_metrics_results["tpr"],
twoset_metrics_results["dr"],
twoset_metrics_results["fr"],
twoset_metrics_results["us"],
twoset_metrics_results["ue"],
1-twoset_metrics_results["fpr"],
twoset_metrics_results["ir"],
twoset_metrics_results["mr"],
twoset_metrics_results["os"],
twoset_metrics_results["oe"]
]
def twoset_segment_metrics_to_string(twoset_metrics_results, separator=", ", prefix="[", suffix="]"):
""" Converting detailed event metric results to a string
Argument:
twoset_metrics_results (dictionary): as provided by the 1st item in the results of eval_events function
Keyword Arguments:
separator (str): characters between each item
prefix (str): string that will be added before the line
suffix (str): string that will be added to the end of the line
Returns:
str: Item order: 0. true positive rate, 1. deletion rate 2. fragmenting rate, 3. start underfill rate, 4. end underfill rate, 5. 1 - false positive rate, 6. insertion rate, 7. merge rate, 8. start overfill rate, 9. end overfill rate
Examples:
>>> twoset_segment_metrics_to_string(test_r)
[0.47058823529411764, 0.11764705882352941, 0.08235294117647059, 0.15294117647058825, 0.17647058823529413, 0.5909090909090909, 0.06493506493506493, 0.09740259740259741, 0.06493506493506493, 0.18181818181818182]
>>> twoset_segment_metrics_to_string(test_r, separator=";", prefix="(", suffix=")\\n")
(0.47058823529411764;0.11764705882352941;0.08235294117647059;0.15294117647058825;0.17647058823529413;0.5909090909090909;0.06493506493506493;0.09740259740259741;0.06493506493506493;0.18181818181818182)\\n
"""
return prefix + separator.join(map(str, twoset_segment_metrics_to_list(twoset_metrics_results))) + suffix
| mit |
subutai/htmresearch | projects/sequence_prediction/mackey_glass/visualize_results.py | 13 | 1819 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import csv
import sys
from matplotlib import pyplot
import numpy
def run(filename, predictionDelay):
with open(filename, 'rU') as infile:
reader = csv.reader(infile)
reader.next()
actuals = []
shifted = []
shifted += [0] * predictionDelay
for row in reader:
actuals.append(row[1])
shifted.append(row[2])
shifted = shifted[predictionDelay:len(actuals)]
actuals = actuals[predictionDelay:]
errors = abs(numpy.array(shifted, dtype=float) - numpy.array(actuals, dtype=float)).tolist()
pyplot.subplot(2, 1, 1)
pyplot.plot(shifted)
pyplot.plot(actuals)
pyplot.subplot(2, 1, 2)
pyplot.plot(errors)
pyplot.show()
if __name__ == "__main__":
predictionDelay = int(sys.argv[2]) if len(sys.argv) > 2 else 1
run(sys.argv[1], predictionDelay)
| agpl-3.0 |
mjafin/bcbio-nextgen | bcbio/rnaseq/qc.py | 3 | 2234 | """Run Broad's RNA-SeqQC tool and handle reporting of useful summary metrics.
"""
# soft imports
try:
import pandas as pd
import statsmodels.formula.api as sm
except ImportError:
pd, sm = None, None
from bcbio import bam
import bcbio.pipeline.datadict as dd
def starts_by_depth(bam_file, data, sample_size=10000000):
"""
Return a set of x, y points where x is the number of reads sequenced and
y is the number of unique start sites identified
If sample size < total reads in a file the file will be downsampled.
"""
binsize = (sample_size / 100) + 1
seen_starts = set()
counted = 0
num_reads = []
starts = []
buffer = []
downsampled = bam.downsample(bam_file, data, sample_size)
with bam.open_samfile(downsampled) as samfile:
for read in samfile:
if read.is_unmapped:
continue
counted += 1
buffer.append(str(read.tid) + ":" + str(read.pos))
if counted % binsize == 0:
seen_starts.update(buffer)
buffer = []
num_reads.append(counted)
starts.append(len(seen_starts))
seen_starts.update(buffer)
num_reads.append(counted)
starts.append(len(seen_starts))
return pd.DataFrame({"reads": num_reads, "starts": starts})
def estimate_library_complexity(df, algorithm="RNA-seq"):
"""
estimate library complexity from the number of reads vs.
number of unique start sites. returns "NA" if there are
not enough data points to fit the line
"""
DEFAULT_CUTOFFS = {"RNA-seq": (0.25, 0.40)}
cutoffs = DEFAULT_CUTOFFS[algorithm]
if len(df) < 5:
return {"unique_starts_per_read": 'nan',
"complexity": "NA"}
model = sm.ols(formula="starts ~ reads", data=df)
fitted = model.fit()
slope = fitted.params["reads"]
if slope <= cutoffs[0]:
complexity = "LOW"
elif slope <= cutoffs[1]:
complexity = "MEDIUM"
else:
complexity = "HIGH"
# for now don't return the complexity flag
return {"Unique Starts Per Read": float(slope)}
# return {"unique_start_per_read": float(slope),
# "complexity": complexity}
| mit |
davidgardenier/frbpoppy | tests/monte_carlo/goodness_of_fit.py | 1 | 11050 | from weighted_quantiles import median
from scipy.stats import ks_2samp
import numpy as np
import os
import matplotlib.pyplot as plt
from frbpoppy import unpickle, TNS, poisson_interval, pprint
from tests.rates.alpha_real import EXPECTED
from tests.convenience import plot_aa_style, rel_path
from simulations import SimulationOverview, POP_SIZE
NORM_SURV = 'parkes-htru'
class GoodnessOfFit:
def __init__(self):
self.run_pars = {1: ['alpha', 'si', 'li'],
2: ['li', 'lum_min', 'lum_max'],
3: ['w_mean', 'w_std'],
4: ['dm_igm_slope', 'dm_host']}
self.norm_surv = NORM_SURV
self.so = SimulationOverview()
self.tns = self.get_tns()
def get_tns(self):
# Only get one-offs
return TNS(repeaters=False, mute=True, update=False).df
def dm(self, pop, survey_name):
"""Calculate GoodnessOfFit for DM distributions."""
mask = ((self.tns.survey == survey_name) & (self.tns.dm <= 950))
try:
ks_dm = ks_2samp(pop.frbs.dm, self.tns[mask].dm)[1]
except ValueError:
ks_dm = np.nan
return ks_dm
def snr(self, pop, survey_name):
mask = ((self.tns.survey == survey_name) & (self.tns.dm <= 950))
try:
ks_snr = ks_2samp(pop.frbs.snr, self.tns[mask].snr)[1]
except ValueError:
ks_snr = np.nan
return ks_snr
def rate(self, pop, survey_name, norm_uuid, run, errs=False):
# Add rate details
sr = pop.source_rate
surv_sim_rate = sr.det / sr.days
# Perhaps use at some stage
if errs:
p_int = poisson_interval(sr.det, sigma=1)
surv_sim_rate_errs = [p/sr.days for p in p_int]
# Determine ratio of detection rates
if survey_name in EXPECTED:
n_frbs, n_days = EXPECTED[survey_name]
else:
n_frbs, n_days = [np.nan, np.nan]
surv_real_rate = n_frbs/n_days
# Get normalisation properties
norm_real_n_frbs, norm_real_n_days = EXPECTED[self.norm_surv]
norm_pop = unpickle(f'mc/run_{run}/{norm_uuid}')
norm_sim_n_frbs = norm_pop.source_rate.det
norm_sim_n_days = norm_pop.source_rate.days
norm_sim_rate = norm_sim_n_frbs / norm_sim_n_days
norm_real_rate = norm_real_n_frbs / norm_real_n_days
if norm_sim_rate == 0:
norm_sim_rate = POP_SIZE / norm_sim_n_days
sim_ratio = surv_sim_rate / norm_sim_rate
real_ratio = surv_real_rate / norm_real_rate
diff = np.abs(sim_ratio - real_ratio)
if diff == 0:
rate_diff = 1e-3
else:
rate_diff = 1 / diff
return rate_diff, pop.n_sources()
def calc_gofs(self, run):
# For each requested run
self.so = SimulationOverview()
par_set = self.so.df[self.so.df.run == run].par_set.iloc[0]
pprint(f'Calculating goodness of fit for run {run}, par set {par_set}')
pars = self.run_pars[par_set]
values = []
# Loop through all combination of parameters
for values, group in self.so.df[self.so.df.run == run].groupby(pars):
pprint(f' - {list(zip(pars, values))}')
# Calculate goodness of fit values for each simulation
for row_ix, row in group.iterrows():
survey_name = row.survey
uuid = row.uuid
pop = unpickle(f'mc/run_{run}/{uuid}')
# Apply a DM cutoff
mask = (pop.frbs.dm <= 950)
pop.frbs.apply(mask)
pop.source_rate.det = pop.n_sources() * pop.source_rate.f_area
dm_gof = self.dm(pop, survey_name)
snr_gof = self.snr(pop, survey_name)
self.so.df.at[row_ix, 'dm_gof'] = dm_gof
self.so.df.at[row_ix, 'snr_gof'] = snr_gof
if pop.n_sources() == 0:
self.so.df.at[row_ix, 'weight'] = 0
self.so.df.at[row_ix, 'n_det'] = pop.n_sources()
pprint(f' - No sources in {survey_name}')
continue
# Find corresponding rate normalisation population uuid
norm_mask = dict(zip(pars, values))
norm_mask['survey'] = self.norm_surv
norm_mask['run'] = run
k = norm_mask.keys()
v = norm_mask.values()
norm_uuid = group.loc[group[k].isin(v).all(axis=1), :].uuid
norm_uuid = norm_uuid.values[0]
rate_diff, n_det = self.rate(pop, survey_name, norm_uuid, run)
# Get rate weighting
self.so.df.at[row_ix, 'weight'] = rate_diff
self.so.df.at[row_ix, 'n_det'] = n_det
pprint(f'Saving the results for run {run}')
# Best matching in terms of rates
max_w = np.nanmax(self.so.df.weight)
self.so.df.loc[self.so.df.weight == 1e3]['weight'] = max_w
self.so.save()
def plot(self, run):
# Get data
# For each requested run
df = self.so.df
par_set = df[df.run == run].par_set.iloc[0]
# For each parameter
for main_par in self.run_pars[par_set]:
pprint(f'Plotting {main_par}')
other_pars = [e for e in self.run_pars[par_set] if e != main_par]
for compare_par in ['dm', 'snr']:
compare_col = f'{compare_par}_gof'
pprint(f' - {compare_col}')
for survey, group_surv in df[df.run == run].groupby('survey'):
pprint(f' - {survey}')
# Set up plot
plot_aa_style()
plt.rcParams["figure.figsize"] = (5.75373*3, 5.75373*3)
plt.rcParams['figure.max_open_warning'] = 125
n_x = group_surv[other_pars[0]].nunique()
if len(other_pars) > 1:
n_y = group_surv[other_pars[1]].nunique()
else:
n_y = 1
fig, ax = plt.subplots(n_x, n_y,
sharex='col', sharey='row')
groups = group_surv.groupby(other_pars)
x = -1
for i, (other_pars_vals, group) in enumerate(groups):
bins = group[main_par].values
values = group[compare_col].values
bins, values = self.add_edges_to_hist(bins, values)
if n_y > 1:
y = i % n_y
if y == 0:
x += 1
a = ax[y, x]
else:
y = i
a = ax[y]
a.step(bins, values, where='mid')
a.set_title = str(other_pars_vals)
diff = np.diff(bins)
if diff[1] != diff[0]:
a.set_xscale('log')
# Set axis label
if y == n_y - 1:
p = other_pars[0]
if isinstance(other_pars_vals, float):
val = other_pars_vals
else:
val = other_pars_vals[0]
p = p.replace('_', ' ')
a.set_xlabel(f'{p} = {val:.2}')
if x == 0:
p = other_pars[1]
val = other_pars_vals[1]
p = p.replace('_', ' ')
a.set_ylabel(f'{p} = {val:.2}')
# Set axis limits
subset = df[df.run == run][main_par]
y_subset = group_surv[compare_col].copy()
try:
low = np.nanmin(y_subset)
high = np.nanmax(y_subset)
except ValueError:
low = 0.0001
high = 1
log = False
if low > 0 and high > 0:
log = True
for a in ax.flatten():
a.set_xlim(subset.min(), subset.max())
if log:
a.set_yscale('log', nonposy='clip')
a.set_ylim(low, high)
p = main_par.replace('_', ' ')
fig.suptitle(f'{p} - {compare_par} - {survey}')
plt.tight_layout()
plt.subplots_adjust(top=0.95)
# Save to subdirectory
path_to_save = rel_path(f'./plots/mc/{main_par}_run{run}/')
if not os.path.isdir(path_to_save):
os.mkdir(path_to_save)
path_to_save += f'{compare_par}_{survey}.pdf'
plt.savefig(path_to_save)
plt.clf()
def add_edges_to_hist(self, bins, n, bin_type='lin'):
"""Add edges to histograms"""
np.seterr(divide='ignore', invalid='ignore')
if bin_type == 'lin':
bin_dif = np.diff(bins)[-1]
bins = np.insert(bins, 0, bins[0] - bin_dif)
bins = np.insert(bins, len(bins), bins[-1] + bin_dif)
else:
bin_dif = np.diff(np.log10(bins))[-1]
bins = np.insert(bins, 0, 10**(np.log10(bins[0])-bin_dif))
bins = np.insert(bins, len(bins), 10**(np.log10(bins[-1])+bin_dif))
n = np.insert(n, 0, np.nan)
n = np.insert(n, len(n), np.nan)
return bins, n
def weighted_median(self, df):
dm_gof = df['dm_gof'].values
dm_weight = df['n_det'].values
snr_gof = df['snr_gof'].values
snr_weight = df['n_det'].values
gofs = np.concatenate([dm_gof, snr_gof])
weights = np.concatenate([dm_weight, snr_weight])
return median(gofs, weights)
def calc_global_max(self, run):
self.so = SimulationOverview()
df = self.so.df[self.so.df.run == run]
par_set = df[df.run == run].par_set.iloc[0]
cols = self.run_pars[par_set]
values = []
gofs = []
# Loop through all combination of parameters
for value, group in df.groupby(cols):
gof = self.weighted_median(group)
values.append(value)
gofs.append(gof)
gofs = np.array(gofs)
# Find maximum (best gof)
if np.isnan(gofs).all():
return dict(zip(cols, [(np.nan, np.nan) for i in cols]))
else:
best_ix = np.nanargmax(gofs)
best_values = values[best_ix]
best_gofs = [gofs[best_ix]]*len(cols)
return dict(zip(cols, zip(best_values, best_gofs)))
| mit |
themrmax/scikit-learn | sklearn/linear_model/tests/test_huber.py | 54 | 7619 | # Authors: Manoj Kumar [email protected]
# License: BSD 3 clause
import numpy as np
from scipy import optimize, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_false
from sklearn.datasets import make_regression
from sklearn.linear_model import (
HuberRegressor, LinearRegression, SGDRegressor, Ridge)
from sklearn.linear_model.huber import _huber_loss_and_gradient
def make_regression_with_outliers(n_samples=50, n_features=20):
rng = np.random.RandomState(0)
# Generate data with outliers by replacing 10% of the samples with noise.
X, y = make_regression(
n_samples=n_samples, n_features=n_features,
random_state=0, noise=0.05)
# Replace 10% of the sample with noise.
num_noise = int(0.1 * n_samples)
random_samples = rng.randint(0, n_samples, num_noise)
X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1]))
return X, y
def test_huber_equals_lr_for_high_epsilon():
# Test that Ridge matches LinearRegression for large epsilon
X, y = make_regression_with_outliers()
lr = LinearRegression(fit_intercept=True)
lr.fit(X, y)
huber = HuberRegressor(fit_intercept=True, epsilon=1e3, alpha=0.0)
huber.fit(X, y)
assert_almost_equal(huber.coef_, lr.coef_, 3)
assert_almost_equal(huber.intercept_, lr.intercept_, 2)
def test_huber_gradient():
# Test that the gradient calculated by _huber_loss_and_gradient is correct
rng = np.random.RandomState(1)
X, y = make_regression_with_outliers()
sample_weight = rng.randint(1, 3, (y.shape[0]))
loss_func = lambda x, *args: _huber_loss_and_gradient(x, *args)[0]
grad_func = lambda x, *args: _huber_loss_and_gradient(x, *args)[1]
# Check using optimize.check_grad that the gradients are equal.
for _ in range(5):
# Check for both fit_intercept and otherwise.
for n_features in [X.shape[1] + 1, X.shape[1] + 2]:
w = rng.randn(n_features)
w[-1] = np.abs(w[-1])
grad_same = optimize.check_grad(
loss_func, grad_func, w, X, y, 0.01, 0.1, sample_weight)
assert_almost_equal(grad_same, 1e-6, 4)
def test_huber_sample_weights():
# Test sample_weights implementation in HuberRegressor"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True)
huber.fit(X, y)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
# Rescale coefs before comparing with assert_array_almost_equal to make sure
# that the number of decimal places used is somewhat insensitive to the
# amplitude of the coefficients and therefore to the scale of the data
# and the regularization parameter
scale = max(np.mean(np.abs(huber.coef_)),
np.mean(np.abs(huber.intercept_)))
huber.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale)
assert_array_almost_equal(huber.intercept_ / scale,
huber_intercept / scale)
X, y = make_regression_with_outliers(n_samples=5, n_features=20)
X_new = np.vstack((X, np.vstack((X[1], X[1], X[3]))))
y_new = np.concatenate((y, [y[1]], [y[1]], [y[3]]))
huber.fit(X_new, y_new)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
sample_weight = np.ones(X.shape[0])
sample_weight[1] = 3
sample_weight[3] = 2
huber.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale)
assert_array_almost_equal(huber.intercept_ / scale,
huber_intercept / scale)
# Test sparse implementation with sample weights.
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(fit_intercept=True)
huber_sparse.fit(X_csr, y, sample_weight=sample_weight)
assert_array_almost_equal(huber_sparse.coef_ / scale,
huber_coef / scale)
def test_huber_sparse():
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.1)
huber.fit(X, y)
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(fit_intercept=True, alpha=0.1)
huber_sparse.fit(X_csr, y)
assert_array_almost_equal(huber_sparse.coef_, huber.coef_)
assert_array_equal(huber.outliers_, huber_sparse.outliers_)
def test_huber_scaling_invariant():
"""Test that outliers filtering is scaling independent."""
rng = np.random.RandomState(0)
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100)
huber.fit(X, y)
n_outliers_mask_1 = huber.outliers_
assert_false(np.all(n_outliers_mask_1))
huber.fit(X, 2. * y)
n_outliers_mask_2 = huber.outliers_
assert_array_equal(n_outliers_mask_2, n_outliers_mask_1)
huber.fit(2. * X, 2. * y)
n_outliers_mask_3 = huber.outliers_
assert_array_equal(n_outliers_mask_3, n_outliers_mask_1)
def test_huber_and_sgd_same_results():
"""Test they should converge to same coefficients for same parameters"""
X, y = make_regression_with_outliers(n_samples=10, n_features=2)
# Fit once to find out the scale parameter. Scale down X and y by scale
# so that the scale parameter is optimized to 1.0
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100,
epsilon=1.35)
huber.fit(X, y)
X_scale = X / huber.scale_
y_scale = y / huber.scale_
huber.fit(X_scale, y_scale)
assert_almost_equal(huber.scale_, 1.0, 3)
sgdreg = SGDRegressor(
alpha=0.0, loss="huber", shuffle=True, random_state=0, n_iter=10000,
fit_intercept=False, epsilon=1.35)
sgdreg.fit(X_scale, y_scale)
assert_array_almost_equal(huber.coef_, sgdreg.coef_, 1)
def test_huber_warm_start():
X, y = make_regression_with_outliers()
huber_warm = HuberRegressor(
fit_intercept=True, alpha=1.0, max_iter=10000, warm_start=True, tol=1e-1)
huber_warm.fit(X, y)
huber_warm_coef = huber_warm.coef_.copy()
huber_warm.fit(X, y)
# SciPy performs the tol check after doing the coef updates, so
# these would be almost same but not equal.
assert_array_almost_equal(huber_warm.coef_, huber_warm_coef, 1)
# No n_iter_ in old SciPy (<=0.9)
if huber_warm.n_iter_ is not None:
assert_equal(0, huber_warm.n_iter_)
def test_huber_better_r2_score():
# Test that huber returns a better r2 score than non-outliers"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.01, max_iter=100)
huber.fit(X, y)
linear_loss = np.dot(X, huber.coef_) + huber.intercept_ - y
mask = np.abs(linear_loss) < huber.epsilon * huber.scale_
huber_score = huber.score(X[mask], y[mask])
huber_outlier_score = huber.score(X[~mask], y[~mask])
# The Ridge regressor should be influenced by the outliers and hence
# give a worse score on the non-outliers as compared to the huber regressor.
ridge = Ridge(fit_intercept=True, alpha=0.01)
ridge.fit(X, y)
ridge_score = ridge.score(X[mask], y[mask])
ridge_outlier_score = ridge.score(X[~mask], y[~mask])
assert_greater(huber_score, ridge_score)
# The huber model should also fit poorly on the outliers.
assert_greater(ridge_outlier_score, huber_outlier_score)
| bsd-3-clause |
AIML/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 387 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
Limags/MissionPlanner | Lib/site-packages/numpy/lib/polynomial.py | 58 | 35930 | """
Functions to operate on polynomials.
"""
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import isscalar, abs, finfo, atleast_1d, hstack
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros, sort_complex
from numpy.lib.type_check import iscomplex, real, imag
from numpy.linalg import eigvals, lstsq
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Evaluate a polynomial at a point.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Or a square matrix object:
>>> np.poly(np.matrix(P))
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
pass
else:
raise ValueError, "input must be 1d or square 2d array."
if len(seq_of_zeros) == 0:
return 1.0
a = [1]
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, [1, -seq_of_zeros[k]], mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
pos_roots = sort_complex(NX.compress(roots.imag > 0, roots))
neg_roots = NX.conjugate(sort_complex(
NX.compress(roots.imag < 0,roots)))
if (len(pos_roots) == len(neg_roots) and
NX.alltrue(neg_roots == pos_roots)):
a = a.real.copy()
return a
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like of shape(M,)
Rank-1 array of polynomial co-efficients.
Returns
-------
out : ndarray
An array containing the complex roots of the polynomial.
Raises
------
ValueError:
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with
a given sequence of roots.
polyval : Evaluate a polynomial at a point.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] Wikipedia, "Companion matrix",
http://en.wikipedia.org/wiki/Companion_matrix
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if len(p.shape) != 1:
raise ValueError,"Input must be a rank-1 array."
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0, :] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : {array_like, poly1d}
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : {None, list of `m` scalars, scalar}, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError, "Order of integral must be positive (see polyder)"
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError, \
"k must be a scalar or a rank-1 array of length 1 or >m."
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
if m < 0:
raise ValueError, "Order of derivative must be positive (see polyint)"
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def polyfit(x, y, deg, rcond=None, full=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than this
relative to the largest singular value will be ignored. The default
value is len(x)*eps, where eps is the relative precision of the float
type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is
False (the default) just the coefficients are returned, when True
diagnostic information from the singular value decomposition is also
returned.
Returns
-------
p : ndarray, shape (M,) or (M, K)
Polynomial coefficients, highest power first.
If `y` was 2-D, the coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond : present only if `full` = True
Residuals of the least-squares fit, the effective rank of the scaled
Vandermonde coefficient matrix, its singular values, and the specified
value of `rcond`. For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Computes polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[n] + ... + x[0] * p[1] + p[0] = y[0]
x[1]**n * p[n] + ... + x[1] * p[1] + p[0] = y[1]
...
x[k]**n * p[n] + ... + x[k] * p[1] + p[0] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
http://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0 :
raise ValueError, "expected deg >= 0"
if x.ndim != 1:
raise TypeError, "expected 1D vector for x"
if x.size == 0:
raise TypeError, "expected non-empty vector for x"
if y.ndim < 1 or y.ndim > 2 :
raise TypeError, "expected 1D or 2D array for y"
if x.shape[0] != y.shape[0] :
raise TypeError, "expected x and y to have same length"
# set rcond
if rcond is None :
rcond = len(x)*finfo(x.dtype).eps
# scale x to improve condition number
scale = abs(x).max()
if scale != 0 :
x /= scale
# solve least squares equation for powers of x
v = vander(x, order)
c, resids, rank, s = lstsq(v, y, rcond)
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning)
# scale returned coefficients
if scale != 0 :
if c.ndim == 1 :
c /= vander([scale], order)[0]
else :
c /= vander([scale], order).T
if full :
return c, resids, rank, s, rcond
else :
return c
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, a 1D array of numbers, or an instance of poly1d, "at"
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = x * y + p[i]
return y
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print p1
1 x + 2
>>> print p2
2
9 x + 5 x + 4
>>> print np.polyadd(p1, p2)
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print p1
2
1 x + 2 x + 3
>>> print p2
2
9 x + 5 x + 1
>>> print np.polymul(p1, p2)
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1,a2 = poly1d(a1),poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.copy()
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while 1:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2)+len(toadd2) > wrap) or \
(len(line1)+len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print np.poly1d(p)
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print p
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
coeffs = None
order = None
variable = None
def __init__(self, c_or_r, r=0, variable=None):
if isinstance(c_or_r, poly1d):
for key in c_or_r.__dict__.keys():
self.__dict__[key] = c_or_r.__dict__[key]
if variable is not None:
self.__dict__['variable'] = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if len(c_or_r.shape) > 1:
raise ValueError, "Polynomial must be 1d only."
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self.__dict__['coeffs'] = c_or_r
self.__dict__['order'] = len(c_or_r) - 1
if variable is None:
variable = 'x'
self.__dict__['variable'] = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError, "Power to non-negative integers only."
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
return NX.alltrue(self.coeffs == other.coeffs)
def __ne__(self, other):
return NX.any(self.coeffs != other.coeffs)
def __setattr__(self, key, val):
raise ValueError, "Attributes cannot be changed this way."
def __getattr__(self, key):
if key in ['r', 'roots']:
return roots(self.coeffs)
elif key in ['c','coef','coefficients']:
return self.coeffs
elif key in ['o']:
return self.order
else:
try:
return self.__dict__[key]
except KeyError:
raise AttributeError("'%s' has no attribute '%s'" % (self.__class__, key))
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError, "Does not support negative powers."
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self.__dict__['coeffs'] = NX.concatenate((zr, self.coeffs))
self.__dict__['order'] = key
ind = 0
self.__dict__['coeffs'][ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always',RankWarning)
| gpl-3.0 |
lancezlin/ml_template_py | lib/python2.7/site-packages/sklearn/cluster/bicluster.py | 26 | 19870 | """Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils import check_random_state
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
return self
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
A = safe_sparse_dot(array.T, array)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, v = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
vt = v.T
if np.any(np.isnan(u)):
A = safe_sparse_dot(array, array.T)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, u = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| mit |
mmottahedi/neuralnilm_prototype | scripts/e154.py | 2 | 6831 | from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer
from neuralnilm.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for LSTM
e110
* Back to Uniform(5) for LSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single LSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(5)
e125
* Pre-train lower layers
e128
* Add back all 5 appliances
* Seq length 1500
* skip_prob = 0.7
e129
* max_input_power = None
* 2nd layer has Uniform(5)
* pre-train bottom layer for 2000 epochs
* add third layer at 4000 epochs
e131
e138
* Trying to replicate e82 and then break it ;)
e140
diff
e141
conv1D layer has Uniform(1), as does 2nd LSTM layer
e142
diff AND power
e144
diff and power and max power is 5900
e145
Uniform(25) for first layer
e146
gradient clip and use peepholes
e147
* try again with new code
e148
* learning rate 0.1
e150
* Same as e149 but without peepholes and using LSTM not BLSTM
e151
* Max pooling
"""
def set_subsample_target(net, epoch):
net.source.subsample_target = 5
net.source.input_padding = 4
net.source.seq_length = 1500
net.generate_validation_data_and_set_shapes()
def exp_a(name):
# 151d but training for much longer and skip prob = 0.7
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5, 5, 5, 5, 5],
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1504,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
n_seq_per_batch=25,
include_diff=True
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=1000,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=.1, clip_range=(-1, 1)),
layers_config=[
{
'type': LSTMLayer,
'num_units': 60,
'W_in_to_cell': Uniform(25),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
],
layer_changes={
5001: {
'remove_from': -3,
'callback': set_subsample_target,
'new_layers':
[
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 80,
'filter_length': 5,
'stride': 1,
'nonlinearity': sigmoid,
'W': Uniform(1)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': FeaturePoolLayer,
'ds': 5, # number of feature maps to be pooled together
'axis': 1 # pool over the time axis
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
},
10001: {
'remove_from': -3,
'new_layers':
[
{
'type': LSTMLayer,
'num_units': 80,
'W_in_to_cell': Uniform(1),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
}
}
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=None)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
print("EXCEPTION:", exception)
import ipdb; ipdb.set_trace()
if __name__ == "__main__":
main()
| mit |
liujuan118/Antiphishing | html_similarity.py | 1 | 1940 | import networkx as nx
from collections import deque
from bs4 import BeautifulSoup
import matplotlib.pyplot as plt
def html_to_dom_tree(root):
labels = {}
node_id = 1
q = deque()
graph = nx.Graph()
q.appendleft({'element': root, "root_id": node_id})
while len(q):
node = q.pop()
if node and node['element'].name == "body":
graph.add_node(node_id, element=node['element'].name)
node_id += 1
root_id = node['root_id']
labels[root_id] = node['element'].name
for t in node['element'].contents:
if t and t.name:
graph.add_node(node_id, element=t.name)
graph.add_edge(root_id, node_id)
q.appendleft({"element": t, "root_id": node_id})
node_id += 1
return graph
test = '<html></html>'
graph = html_to_dom_tree(BeautifulSoup(open('a.html', encoding='utf-8'), 'lxml'))
# nx.draw(graph)
# plt.show()
a = '<span style="font-size:18px;"><span style="font-size:14px;">'
subpath_track = {}
def generate_subpaths(path, l):
if l == len(path):
if tuple(path) not in subpath_track:
subpath_track[tuple(path)] = 1
else:
subpath_track[tuple(path)] += 1
else:
index = 0
while l+index-1 < len(path):
if tuple(path[index: l+index]) not in subpath_track:
subpath_track[tuple(path[index: l+index])] = 1
else:
subpath_track[tuple(path[index: l+index])] += 1
index += 1
generate_subpaths(path, l+1)
def get_subpaths(graph, root, track, path):
track[root] = True
if graph.degree(root) == 1:
generate_subpaths(path, 1)
else:
for node in graph.neighbors(root):
if node not in track:
get_subpaths(graph, node, track, path + [node, ])
def kernel_subpath(t1, t2, common_track):
kernel_v = 0
for p in subpath_track:
kernel_v += common_track[t1][p]*common_track[t2][p]
return kernel_v
kernel_v = kernel_subpath(graph, graph,graph)
print(kernel_v)
| gpl-3.0 |
davidgbe/scikit-learn | benchmarks/bench_plot_incremental_pca.py | 374 | 6430 | """
========================
IncrementalPCA benchmark
========================
Benchmarks for IncrementalPCA
"""
import numpy as np
import gc
from time import time
from collections import defaultdict
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_lfw_people
from sklearn.decomposition import IncrementalPCA, RandomizedPCA, PCA
def plot_results(X, y, label):
plt.plot(X, y, label=label, marker='o')
def benchmark(estimator, data):
gc.collect()
print("Benching %s" % estimator)
t0 = time()
estimator.fit(data)
training_time = time() - t0
data_t = estimator.transform(data)
data_r = estimator.inverse_transform(data_t)
reconstruction_error = np.mean(np.abs(data - data_r))
return {'time': training_time, 'error': reconstruction_error}
def plot_feature_times(all_times, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_times['pca'], label="PCA")
plot_results(all_components, all_times['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_times['rpca'], label="RandomizedPCA")
plt.legend(loc="upper left")
plt.suptitle("Algorithm runtime vs. n_components\n \
LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Time (seconds)")
def plot_feature_errors(all_errors, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_errors['pca'], label="PCA")
plot_results(all_components, all_errors['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_errors['rpca'], label="RandomizedPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. n_components\n"
"LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Mean absolute error")
def plot_batch_times(all_times, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_times['pca'], label="PCA")
plot_results(all_batch_sizes, all_times['rpca'], label="RandomizedPCA")
plot_results(all_batch_sizes, all_times['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm runtime vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Time (seconds)")
def plot_batch_errors(all_errors, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_errors['pca'], label="PCA")
plot_results(all_batch_sizes, all_errors['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Mean absolute error")
def fixed_batch_size_comparison(data):
all_features = [i.astype(int) for i in np.linspace(data.shape[1] // 10,
data.shape[1], num=5)]
batch_size = 1000
# Compare runtimes and error for fixed batch size
all_times = defaultdict(list)
all_errors = defaultdict(list)
for n_components in all_features:
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('ipca', ipca),
('rpca', rpca)]}
for k in sorted(results_dict.keys()):
all_times[k].append(results_dict[k]['time'])
all_errors[k].append(results_dict[k]['error'])
plot_feature_times(all_times, batch_size, all_features, data)
plot_feature_errors(all_errors, batch_size, all_features, data)
def variable_batch_size_comparison(data):
batch_sizes = [i.astype(int) for i in np.linspace(data.shape[0] // 10,
data.shape[0], num=10)]
for n_components in [i.astype(int) for i in
np.linspace(data.shape[1] // 10,
data.shape[1], num=4)]:
all_times = defaultdict(list)
all_errors = defaultdict(list)
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('rpca', rpca)]}
# Create flat baselines to compare the variation over batch size
all_times['pca'].extend([results_dict['pca']['time']] *
len(batch_sizes))
all_errors['pca'].extend([results_dict['pca']['error']] *
len(batch_sizes))
all_times['rpca'].extend([results_dict['rpca']['time']] *
len(batch_sizes))
all_errors['rpca'].extend([results_dict['rpca']['error']] *
len(batch_sizes))
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=n_components,
batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('ipca',
ipca)]}
all_times['ipca'].append(results_dict['ipca']['time'])
all_errors['ipca'].append(results_dict['ipca']['error'])
plot_batch_times(all_times, n_components, batch_sizes, data)
# RandomizedPCA error is always worse (approx 100x) than other PCA
# tests
plot_batch_errors(all_errors, n_components, batch_sizes, data)
faces = fetch_lfw_people(resize=.2, min_faces_per_person=5)
# limit dataset to 5000 people (don't care who they are!)
X = faces.data[:5000]
n_samples, h, w = faces.images.shape
n_features = X.shape[1]
X -= X.mean(axis=0)
X /= X.std(axis=0)
fixed_batch_size_comparison(X)
variable_batch_size_comparison(X)
plt.show()
| bsd-3-clause |
flightgong/scikit-learn | examples/ensemble/plot_gradient_boosting_quantile.py | 392 | 2114 | """
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
vijaysbhat/incubator-airflow | setup.py | 5 | 9813 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages, Command
from setuptools.command.test import test as TestCommand
import imp
import logging
import os
import pip
import sys
logger = logging.getLogger(__name__)
# Kept manually in sync with airflow.__version__
version = imp.load_source(
'airflow.version', os.path.join('airflow', 'version.py')).version
class Tox(TestCommand):
user_options = [('tox-args=', None, "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = ''
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import tox
errno = tox.cmdline(args=self.tox_args.split())
sys.exit(errno)
class CleanCommand(Command):
"""Custom clean command to tidy up the project root."""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info')
def git_version(version):
"""
Return a version to identify the state of the underlying git repo. The version will
indicate whether the head of the current git-backed working directory is tied to a
release tag or not : it will indicate the former with a 'release:{version}' prefix
and the latter with a 'dev0' prefix. Following the prefix will be a sha of the current
branch head. Finally, a "dirty" suffix is appended to indicate that uncommitted changes
are present.
"""
repo = None
try:
import git
repo = git.Repo('.git')
except ImportError:
logger.warning('gitpython not found: Cannot compute the git version.')
return ''
except Exception as e:
logger.warning('Git repo not found: Cannot compute the git version.')
return ''
if repo:
sha = repo.head.commit.hexsha
if repo.is_dirty():
return '.dev0+{sha}.dirty'.format(sha=sha)
# commit is clean
# is it release of `version` ?
try:
tag = repo.git.describe(
match='[0-9]*', exact_match=True,
tags=True, dirty=True)
assert tag == version, (tag, version)
return '.release:{version}+{sha}'.format(version=version,
sha=sha)
except git.GitCommandError:
return '.dev0+{sha}'.format(sha=sha)
else:
return 'no_git_version'
def write_version(filename=os.path.join(*['airflow',
'git_version'])):
text = "{}".format(git_version(version))
with open(filename, 'w') as a:
a.write(text)
def check_previous():
installed_packages = ([package.project_name for package
in pip.get_installed_distributions()])
if 'airflow' in installed_packages:
print("An earlier non-apache version of Airflow was installed, "
"please uninstall it first. Then reinstall.")
sys.exit(1)
async = [
'greenlet>=0.4.9',
'eventlet>= 0.9.7',
'gevent>=0.13'
]
azure = ['azure-storage>=0.34.0']
celery = [
'celery>=3.1.17',
'flower>=0.7.3'
]
cgroups = [
'cgroupspy>=0.1.4',
]
crypto = ['cryptography>=0.9.3']
dask = [
'distributed>=1.15.2, <2'
]
databricks = ['requests>=2.5.1, <3']
datadog = ['datadog>=0.14.0']
doc = [
'sphinx>=1.2.3',
'sphinx-argparse>=0.1.13',
'sphinx-rtd-theme>=0.1.6',
'Sphinx-PyPI-upload>=0.2.1'
]
docker = ['docker-py>=1.6.0']
druid = ['pydruid>=0.2.1']
emr = ['boto3>=1.0.0']
gcp_api = [
'httplib2',
'google-api-python-client>=1.5.0, <1.6.0',
'oauth2client>=2.0.2, <2.1.0',
'PyOpenSSL',
'pandas-gbq'
]
hdfs = ['snakebite>=2.7.8']
webhdfs = ['hdfs[dataframe,avro,kerberos]>=2.0.4']
jira = ['JIRA>1.0.7']
hive = [
'hive-thrift-py>=0.0.1',
'pyhive>=0.1.3',
'impyla>=0.13.3',
'unicodecsv>=0.14.1'
]
jdbc = ['jaydebeapi>=0.2.0']
mssql = ['pymssql>=2.1.1', 'unicodecsv>=0.14.1']
mysql = ['mysqlclient>=1.3.6']
rabbitmq = ['librabbitmq>=1.6.1']
oracle = ['cx_Oracle>=5.1.2']
postgres = ['psycopg2>=2.7.1']
salesforce = ['simple-salesforce>=0.72']
s3 = [
'boto>=2.36.0',
'filechunkio>=1.6',
]
samba = ['pysmbclient>=0.1.3']
slack = ['slackclient>=1.0.0']
statsd = ['statsd>=3.0.1, <4.0']
vertica = ['vertica-python>=0.5.1']
ldap = ['ldap3>=0.9.9.1']
kerberos = ['pykerberos>=1.1.13',
'requests_kerberos>=0.10.0',
'thrift_sasl>=0.2.0',
'snakebite[kerberos]>=2.7.8',
'kerberos>=1.2.5']
password = [
'bcrypt>=2.0.0',
'flask-bcrypt>=0.7.1',
]
github_enterprise = ['Flask-OAuthlib>=0.9.1']
qds = ['qds-sdk>=1.9.0']
cloudant = ['cloudant>=0.5.9,<2.0'] # major update coming soon, clamp to 0.x
redis = ['redis>=2.10.5']
all_dbs = postgres + mysql + hive + mssql + hdfs + vertica + cloudant
devel = [
'click',
'freezegun',
'jira',
'lxml>=3.3.4',
'mock',
'moto',
'nose',
'nose-ignore-docstring==0.2',
'nose-timer',
'parameterized',
'rednose'
]
devel_minreq = devel + mysql + doc + password + s3 + cgroups
devel_hadoop = devel_minreq + hive + hdfs + webhdfs + kerberos
devel_all = devel + all_dbs + doc + samba + s3 + slack + crypto + oracle + docker
def do_setup():
check_previous()
write_version()
setup(
name='apache-airflow',
description='Programmatically author, schedule and monitor data pipelines',
license='Apache License 2.0',
version=version,
packages=find_packages(),
package_data={'': ['airflow/alembic.ini', "airflow/git_version"]},
include_package_data=True,
zip_safe=False,
scripts=['airflow/bin/airflow'],
install_requires=[
'alembic>=0.8.3, <0.9',
'bleach==2.0.0',
'configparser>=3.5.0, <3.6.0',
'croniter>=0.3.8, <0.4',
'dill>=0.2.2, <0.3',
'flask>=0.11, <0.12',
'flask-admin==1.4.1',
'flask-cache>=0.13.1, <0.14',
'flask-login==0.2.11',
'flask-swagger==0.2.13',
'flask-wtf==0.14',
'funcsigs==1.0.0',
'future>=0.16.0, <0.17',
'gitpython>=2.0.2',
'gunicorn>=19.3.0, <19.4.0', # 19.4.? seemed to have issues
'jinja2>=2.7.3, <2.9.0',
'lxml>=3.6.0, <4.0',
'markdown>=2.5.2, <3.0',
'pandas>=0.17.1, <1.0.0',
'psutil>=4.2.0, <5.0.0',
'pygments>=2.0.1, <3.0',
'python-daemon>=2.1.1, <2.2',
'python-dateutil>=2.3, <3',
'python-nvd3==0.14.2',
'requests>=2.5.1, <3',
'setproctitle>=1.1.8, <2',
'sqlalchemy>=0.9.8',
'tabulate>=0.7.5, <0.8.0',
'thrift>=0.9.2, <0.10',
'zope.deprecation>=4.0, <5.0',
],
extras_require={
'all': devel_all,
'all_dbs': all_dbs,
'async': async,
'azure': azure,
'celery': celery,
'cgroups': cgroups,
'cloudant': cloudant,
'crypto': crypto,
'dask': dask,
'databricks': databricks,
'datadog': datadog,
'devel': devel_minreq,
'devel_hadoop': devel_hadoop,
'doc': doc,
'docker': docker,
'druid': druid,
'emr': emr,
'gcp_api': gcp_api,
'github_enterprise': github_enterprise,
'hdfs': hdfs,
'hive': hive,
'jdbc': jdbc,
'kerberos': kerberos,
'ldap': ldap,
'mssql': mssql,
'mysql': mysql,
'oracle': oracle,
'password': password,
'postgres': postgres,
'qds': qds,
'rabbitmq': rabbitmq,
's3': s3,
'salesforce': salesforce,
'samba': samba,
'slack': slack,
'statsd': statsd,
'vertica': vertica,
'webhdfs': webhdfs,
'jira': jira,
'redis': redis,
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: System :: Monitoring',
],
author='Apache Software Foundation',
author_email='[email protected]',
url='http://airflow.incubator.apache.org/',
download_url=(
'https://dist.apache.org/repos/dist/release/incubator/airflow/' + version),
cmdclass={
'test': Tox,
'extra_clean': CleanCommand,
},
)
if __name__ == "__main__":
do_setup()
| apache-2.0 |
nesterione/scikit-learn | examples/cluster/plot_kmeans_stability_low_dim_dense.py | 338 | 4324 | """
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
| bsd-3-clause |
sgenoud/scikit-learn | examples/cluster/plot_cluster_iris.py | 1 | 2556 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print __doc__
# Code source: Gael Varoqueux
# Modified for Documentation merge by Jaques Grobler
# License: BSD
import numpy as np
import pylab as pl
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1, init='random'),
}
fignum = 1
for name, est in estimators.iteritems():
fig = pl.figure(fignum, figsize=(4, 3))
pl.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
pl.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = pl.figure(fignum, figsize=(4, 3))
pl.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
pl.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'),
)
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
pl.show()
| bsd-3-clause |
bsipocz/pyspeckit | pyspeckit/spectrum/models/gaussfitter.py | 2 | 10224 | """
===============
Gaussian fitter
===============
.. moduleauthor:: Adam Ginsburg <[email protected]>
Created 3/17/08
Original version available at http://code.google.com/p/agpy/source/browse/trunk/agpy/gaussfitter.py
(the version below uses a Class instead of independent functions)
"""
import numpy
from numpy.ma import median
from numpy import pi
from pyspeckit.mpfit import mpfit
import matplotlib.cbook as mpcb
from . import mpfit_messages
from . import model
class gaussian_fitter(model.SpectralModel):
"""
A rather complicated Gaussian fitter class. Inherits from, but overrides
most components of, :mod:`model.SpectralModel`
"""
def __init__(self):
self.npars = 3
self.npeaks = 1
self.onepeakgaussfit = self._fourparfitter(self.onepeakgaussian)
def __call__(self,*args,**kwargs):
return self.multigaussfit(*args,**kwargs)
def onepeakgaussian(self, x,H,A,dx,w):
"""
Returns a 1-dimensional gaussian of form
H+A*numpy.exp(-(x-dx)**2/(2*w**2))
[height,amplitude,center,width]
"""
x = numpy.array(x) # make sure xarr is no longer a spectroscopic axis
return H+A*numpy.exp(-(x-dx)**2/(2*w**2))
def multipeakgaussian(self, x, pars):
"""
Returns flux at position x due to contributions from multiple Gaussians.
"""
x = numpy.array(x) # make sure xarr is no longer a spectroscopic axis
pars = numpy.reshape(pars, (len(pars) / 3, 3))
result = 0
for fit in pars: result += self.onepeakgaussian(x, 0, fit[0], fit[1], fit[2])
return result
def slope(self, x):
"""
Return slope at position x for multicomponent Gaussian fit. Need this in measurements class for
finding the FWHM of multicomponent lines whose centroids are not identical.
"""
pars = numpy.reshape(self.mpp, (len(self.mpp) / 3, 3))
result = 0
for fit in pars:
result += self.onepeakgaussian(x, 0, fit[0], fit[1], fit[2]) * (-2. * (x - fit[1]) / 2. / fit[2]**2)
return result
def n_gaussian(self, pars=None,a=None,dx=None,sigma=None):
"""
Returns a function that sums over N gaussians, where N is the length of
a,dx,sigma *OR* N = len(pars) / 3
The background "height" is assumed to be zero (you must "baseline" your
spectrum before fitting)
pars - a list with len(pars) = 3n, assuming a,dx,sigma repeated
dx - offset (velocity center) values
sigma - line widths
a - amplitudes
"""
if len(pars) % 3 == 0:
a = [pars[ii] for ii in xrange(0,len(pars),3)]
dx = [pars[ii] for ii in xrange(1,len(pars),3)]
sigma = [pars[ii] for ii in xrange(2,len(pars),3)]
elif not(len(dx) == len(sigma) == len(a)):
raise ValueError("Wrong array lengths! dx: %i sigma: %i a: %i" % (len(dx),len(sigma),len(a)))
def g(x):
v = numpy.zeros(len(x))
for ii in range(len(pars)/3):
v += a[ii] * numpy.exp( - ( x - dx[ii] )**2 / (2.0*sigma[ii]**2) )
return v
return g
def multigaussfit(self, xax, data, npeaks=1, err=None, params=[1,0,1],
fixed=[False,False,False], limitedmin=[False,False,True],
limitedmax=[False,False,False], minpars=[0,0,0], maxpars=[0,0,0],
quiet=True, shh=True, veryverbose=False, negamp=None,
tied = ['', '', ''], parinfo=None, debug=False, **kwargs):
"""
An improvement on onepeakgaussfit. Lets you fit multiple gaussians.
Inputs:
xax - x axis
data - y axis
npeaks - How many gaussians to fit? Default 1 (this could supersede onepeakgaussfit)
err - error corresponding to data
These parameters need to have length = 3*npeaks. If npeaks > 1 and length = 3, they will
be replicated npeaks times, otherwise they will be reset to defaults:
params - Fit parameters: [amplitude, offset, width] * npeaks
If len(params) % 3 == 0, npeaks will be set to len(params) / 3
fixed - Is parameter fixed?
limitedmin/minpars - set lower limits on each parameter (default: width>0)
limitedmax/maxpars - set upper limits on each parameter
tied - link parameters together
quiet - should MPFIT output each iteration?
shh - output final parameters?
kwargs are passed to mpfit
Returns:
Fit parameters
Model
Fit errors
chi2
"""
if len(params) != npeaks and (len(params) / 3) > npeaks:
self.npeaks = len(params) / 3
else:
self.npeaks = npeaks
if isinstance(params,numpy.ndarray): params=params.tolist()
# make sure all various things are the right length; if they're not, fix them using the defaults
# multiformaldehydefit should process negamp directly if kwargs.has_key('negamp') is False: kwargs['negamp'] = None
pardict = {"params":params,"fixed":fixed,"limitedmin":limitedmin,"limitedmax":limitedmax,"minpars":minpars,"maxpars":maxpars,"tied":tied}
for parlistname in pardict:
parlist = pardict[parlistname]
if len(parlist) != 3*self.npeaks:
# if you leave the defaults, or enter something that can be multiplied by 3 to get to the
# right number of formaldehydeians, it will just replicate
if veryverbose: print "Correcting length of parameter %s" % parlistname
if len(parlist) == 3:
parlist *= self.npeaks
elif parlistname=="params":
parlist[:] = [1,0,1] * self.npeaks
elif parlistname=="fixed":
parlist[:] = [False,False,False] * self.npeaks
elif parlistname=="limitedmax":
if negamp is None: parlist[:] = [False,False,False] * self.npeaks
elif negamp is False: parlist[:] = [False,False,False] * self.npeaks
else: parlist[:] = [True,False,False] * self.npeaks
elif parlistname=="limitedmin":
if negamp is None: parlist[:] = [False,False,True] * self.npeaks # Lines can't have negative width!
elif negamp is False: parlist[:] = [True,False,True] * self.npeaks
else: parlist[:] = [False,False,True] * self.npeaks
elif parlistname=="minpars" or parlistname=="maxpars":
parlist[:] = [0,0,0] * self.npeaks
elif parlistname=="tied":
parlist[:] = ['','',''] * self.npeaks
# mpfit doesn't recognize negamp, so get rid of it now that we're done setting limitedmin/max and min/maxpars
#if kwargs.has_key('negamp'): kwargs.pop('negamp')
def mpfitfun(x,y,err):
if err is None:
def f(p,fjac=None): return [0,(y-self.n_gaussian(pars=p)(x))]
else:
def f(p,fjac=None): return [0,(y-self.n_gaussian(pars=p)(x))/err]
return f
if xax is None:
xax = numpy.arange(len(data))
parnames = {0:"AMPLITUDE",1:"SHIFT",2:"WIDTH"}
if parinfo is None:
parinfo = [ {'n':ii, 'value':params[ii],
'limits':[minpars[ii],maxpars[ii]],
'limited':[limitedmin[ii],limitedmax[ii]], 'fixed':fixed[ii],
'parname':parnames[ii%3]+str(ii/3), 'error':ii, 'tied':tied[ii]}
for ii in xrange(len(params)) ]
if veryverbose:
print "GUESSES: "
print "\n".join(["%s: %s" % (p['parname'],p['value']) for p in parinfo])
if debug:
for p in parinfo: print p
mp = mpfit(mpfitfun(xax,data,err),parinfo=parinfo,quiet=quiet,**kwargs)
mpp = mp.params
if mp.perror is not None: mpperr = mp.perror
else: mpperr = mpp*0
chi2 = mp.fnorm
if mp.status == 0:
raise Exception(mp.errmsg)
if not shh:
print "Fit status: ",mp.status
print "Fit error message: ",mp.errmsg
print "Fit message: ",mpfit_messages[mp.status]
print "Final fit values: "
for i,p in enumerate(mpp):
parinfo[i]['value'] = p
print parinfo[i]['parname'],p," +/- ",mpperr[i]
print "Chi2: ",mp.fnorm," Reduced Chi2: ",mp.fnorm/len(data)," DOF:",len(data)-len(mpp)
self.mp = mp
self.mpp = mpp
self.mpperr = mpperr
self.model = self.n_gaussian(pars=mpp)(xax)
return mpp,self.n_gaussian(pars=mpp)(xax),mpperr,chi2
def annotations(self):
label_list = [(
"$A(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[0+jj*self.npars],self.mpperr[0+jj*self.npars]),
"$x(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[1+jj*self.npars],self.mpperr[1+jj*self.npars]),
"$\\sigma(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[2+jj*self.npars],self.mpperr[2+jj*self.npars])
) for jj in range(self.npeaks)]
labels = tuple(mpcb.flatten(label_list))
return labels
def components(self,xarr,modelpars):
modelcomponents = [ self.onepeakgaussian(xarr,
0.0,modelpars[3*i],modelpars[3*i+1],modelpars[3*i+2]) for i in range(self.npeaks)]
return modelcomponents
def integral(self, modelpars):
"""
Return the integral of the individual components (ignoring height)
"""
return self.model.sum()
# this is the "proper" way to do it, but the above line was used for compatibility with other models
integ = 0
if len(modelpars) % 3 == 0:
for amp,cen,width in numpy.reshape(modelpars,[len(modelpars)/3,3]):
integ += amp*width*numpy.sqrt(2.0*numpy.pi)
return integ
n_modelfunc = n_gaussian
| mit |
pratapvardhan/scikit-learn | sklearn/feature_extraction/tests/test_image.py | 25 | 11187 | # Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import scipy as sp
from scipy import ndimage
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_raises
from sklearn.feature_extraction.image import (
img_to_graph, grid_to_graph, extract_patches_2d,
reconstruct_from_patches_2d, PatchExtractor, extract_patches)
from sklearn.utils.graph import connected_components
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
def test_img_to_graph():
x, y = np.mgrid[:4, :4] - 10
grad_x = img_to_graph(x)
grad_y = img_to_graph(y)
assert_equal(grad_x.nnz, grad_y.nnz)
# Negative elements are the diagonal: the elements of the original
# image. Positive elements are the values of the gradient, they
# should all be equal on grad_x and grad_y
np.testing.assert_array_equal(grad_x.data[grad_x.data > 0],
grad_y.data[grad_y.data > 0])
def test_grid_to_graph():
# Checking that the function works with graphs containing no edges
size = 2
roi_size = 1
# Generating two convex parts with one vertex
# Thus, edges will be empty in _to_graph
mask = np.zeros((size, size), dtype=np.bool)
mask[0:roi_size, 0:roi_size] = True
mask[-roi_size:, -roi_size:] = True
mask = mask.reshape(size ** 2)
A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
assert_true(connected_components(A)[0] == 2)
# Checking that the function works whatever the type of mask is
mask = np.ones((size, size), dtype=np.int16)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
assert_true(connected_components(A)[0] == 1)
# Checking dtype of the graph
mask = np.ones((size, size))
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool)
assert_true(A.dtype == np.bool)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int)
assert_true(A.dtype == np.int)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask,
dtype=np.float64)
assert_true(A.dtype == np.float64)
def test_connect_regions():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
for thr in (50, 150):
mask = face > thr
graph = img_to_graph(face, mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def test_connect_regions_with_grid():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
mask = face > 50
graph = grid_to_graph(*face.shape, mask=mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
mask = face > 150
graph = grid_to_graph(*face.shape, mask=mask, dtype=None)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def _downsampled_face():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
face = face.astype(np.float32)
face = (face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2]
+ face[1::2, 1::2])
face = (face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2]
+ face[1::2, 1::2])
face = face.astype(np.float32)
face /= 16.0
return face
def _orange_face(face=None):
face = _downsampled_face() if face is None else face
face_color = np.zeros(face.shape + (3,))
face_color[:, :, 0] = 256 - face
face_color[:, :, 1] = 256 - face / 2
face_color[:, :, 2] = 256 - face / 4
return face_color
def _make_images(face=None):
face = _downsampled_face() if face is None else face
# make a collection of faces
images = np.zeros((3,) + face.shape)
images[0] = face
images[1] = face + 1
images[2] = face + 2
return images
downsampled_face = _downsampled_face()
orange_face = _orange_face(downsampled_face)
face_collection = _make_images(downsampled_face)
def test_extract_patches_all():
face = downsampled_face
i_h, i_w = face.shape
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_all_color():
face = orange_face
i_h, i_w = face.shape[:2]
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_all_rect():
face = downsampled_face
face = face[:, 32:97]
i_h, i_w = face.shape
p_h, p_w = 16, 12
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_max_patches():
face = downsampled_face
i_h, i_w = face.shape
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w), max_patches=100)
assert_equal(patches.shape, (100, p_h, p_w))
expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
patches = extract_patches_2d(face, (p_h, p_w), max_patches=0.5)
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
assert_raises(ValueError, extract_patches_2d, face, (p_h, p_w),
max_patches=2.0)
assert_raises(ValueError, extract_patches_2d, face, (p_h, p_w),
max_patches=-1.0)
def test_reconstruct_patches_perfect():
face = downsampled_face
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w))
face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
np.testing.assert_array_almost_equal(face, face_reconstructed)
def test_reconstruct_patches_perfect_color():
face = orange_face
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w))
face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
np.testing.assert_array_almost_equal(face, face_reconstructed)
def test_patch_extractor_fit():
faces = face_collection
extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
assert_true(extr == extr.fit(faces))
def test_patch_extractor_max_patches():
faces = face_collection
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(faces) * max_patches
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
max_patches = 0.5
expected_n_patches = len(faces) * int((i_h - p_h + 1) * (i_w - p_w + 1)
* max_patches)
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_max_patches_default():
faces = face_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(faces)
assert_equal(patches.shape, (len(faces) * 100, 19, 25))
def test_patch_extractor_all_patches():
faces = face_collection
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_color():
faces = _make_images(orange_face)
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_strided():
image_shapes_1D = [(10,), (10,), (11,), (10,)]
patch_sizes_1D = [(1,), (2,), (3,), (8,)]
patch_steps_1D = [(1,), (1,), (4,), (2,)]
expected_views_1D = [(10,), (9,), (3,), (2,)]
last_patch_1D = [(10,), (8,), (8,), (2,)]
image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]
expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]
image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]
expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]
image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
expected_views = expected_views_1D + expected_views_2D + expected_views_3D
last_patches = last_patch_1D + last_patch_2D + last_patch_3D
for (image_shape, patch_size, patch_step, expected_view,
last_patch) in zip(image_shapes, patch_sizes, patch_steps,
expected_views, last_patches):
image = np.arange(np.prod(image_shape)).reshape(image_shape)
patches = extract_patches(image, patch_shape=patch_size,
extraction_step=patch_step)
ndim = len(image_shape)
assert_true(patches.shape[:ndim] == expected_view)
last_patch_slices = [slice(i, i + j, None) for i, j in
zip(last_patch, patch_size)]
assert_true((patches[[slice(-1, None, None)] * ndim] ==
image[last_patch_slices].squeeze()).all())
def test_extract_patches_square():
# test same patch size for all dimensions
face = downsampled_face
i_h, i_w = face.shape
p = 8
expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
patches = extract_patches(face, patch_shape=p)
assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1],
p, p))
def test_width_patch():
# width and height of the patch should be less than the image
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(ValueError, extract_patches_2d, x, (4, 1))
assert_raises(ValueError, extract_patches_2d, x, (1, 4))
| bsd-3-clause |
iohannez/gnuradio | gr-filter/examples/fir_filter_fff.py | 7 | 4018 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gnuradio import gr, filter
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio.eng_arg import eng_float, intx
from argparse import ArgumentParser
import sys
import numpy
try:
from matplotlib import pyplot
except ImportError:
print("Error: could not from matplotlib import pyplot (http://matplotlib.sourceforge.net/)")
sys.exit(1)
class example_fir_filter_fff(gr.top_block):
def __init__(self, N, fs, bw, tw, atten, D):
gr.top_block.__init__(self)
self._nsamps = N
self._fs = fs
self._bw = bw
self._tw = tw
self._at = atten
self._decim = D
taps = filter.firdes.low_pass_2(1, self._fs, self._bw, self._tw, self._at)
print("Num. Taps: ", len(taps))
self.src = analog.noise_source_f(analog.GR_GAUSSIAN, 1)
self.head = blocks.head(gr.sizeof_float, self._nsamps)
self.filt0 = filter.fir_filter_fff(self._decim, taps)
self.vsnk_src = blocks.vector_sink_f()
self.vsnk_out = blocks.vector_sink_f()
self.connect(self.src, self.head, self.vsnk_src)
self.connect(self.head, self.filt0, self.vsnk_out)
def main():
parser = ArgumentParser(conflict_handler="resolve")
parser.add_argument("-N", "--nsamples", type=int, default=10000,
help="Number of samples to process [default=%(default)r]")
parser.add_argument("-s", "--samplerate", type=eng_float, default=8000,
help="System sample rate [default=%(default)r]")
parser.add_argument("-B", "--bandwidth", type=eng_float, default=1000,
help="Filter bandwidth [default=%(default)r]")
parser.add_argument("-T", "--transition", type=eng_float, default=100,
help="Transition band [default=%(default)r]")
parser.add_argument("-A", "--attenuation", type=eng_float, default=80,
help="Stopband attenuation [default=%(default)r]")
parser.add_argument("-D", "--decimation", type=int, default=1,
help="Decmation factor [default=%(default)r]")
args = parser.parse_args()
put = example_fir_filter_fff(args.nsamples,
args.samplerate,
args.bandwidth,
args.transition,
args.attenuation,
args.decimation)
put.run()
data_src = numpy.array(put.vsnk_src.data())
data_snk = numpy.array(put.vsnk_out.data())
# Plot the signals PSDs
nfft = 1024
f1 = pyplot.figure(1, figsize=(12,10))
s1 = f1.add_subplot(1,1,1)
s1.psd(data_src, NFFT=nfft, noverlap=nfft / 4,
Fs=args.samplerate)
s1.psd(data_snk, NFFT=nfft, noverlap=nfft / 4,
Fs=args.samplerate)
f2 = pyplot.figure(2, figsize=(12,10))
s2 = f2.add_subplot(1,1,1)
s2.plot(data_src)
s2.plot(data_snk.real, 'g')
pyplot.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
mblondel/scikit-learn | sklearn/linear_model/tests/test_randomized_l1.py | 39 | 4706 | # Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
"""Check lasso stability path"""
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
"""Check randomized lasso"""
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
"""Check randomized sparse logistic regression"""
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
"""Check randomized sparse logistic regression on sparse data"""
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| bsd-3-clause |
laosiaudi/Wiki-search | src/breadth.py | 1 | 2755 | #!/usr/bin/python
#-*-coding:utf-8-*-
from sets import Set
import Queue
import time
from Class import *
import networkx as nx
import matplotlib.pyplot as plt
global links
global keyset
class breadthSearch():
def __init__(self,Begin,Destination,data,outlink,inlink):
global links
global keyset
links = {}
keyset = set()
for line in outlink:
component = line.split(' ')
keyword = component[0]
lists = []
for item in component:
if item[-1] == '\n':
item = item[:-1]
if item == component[0]:
continue
else:
lists.append(item)
keyset.add(keyword)
links[keyword] = lists
self.begin = Begin
self.dest = Destination
def Search(self):
global links
global keyset
if not self.begin in keyset or not self.dest in keyset:
return ("",0,0,False,nx.Graph())
else:
WordSet = Set()
List = []
q = []
start = time.clock()
BeginTerm = term(self.begin,self.begin)
WordSet.add(self.begin)
List.append(BeginTerm)
Flag = False
cou = 0
while List.count != 0:
cou = cou + 1
if not List:
break
terms = List.pop(0)
q.append(terms)
p = terms.title
if p not in keyset:
continue
Termlinks = links[p]
if p == self.dest:
Flag = True
etime = time.clock() - start
return (terms.path,cou,etime,Flag,self.drawgraph(result_path,q))
break
if self.dest in Termlinks:
result_path = terms.path + '->' + self.dest
Flag = True
etime = time.clock() - start
return (result_path,cou,etime,Flag,self.drawgraph(result_path,q))
break
for link in Termlinks:
if link not in WordSet:
Path = terms.path + '->' + link
NewTerm = term(link,Path)
List.append(NewTerm)
WordSet.add(link)
if Flag == False:
result_path = ""
cou = 0
return (result_path,cou,0,Flag,nx.Graph())
def drawgraph(self,path,nodelist):
color = {}
graph = nx.Graph()
for item in nodelist:
graph.add_node(item.title)
if item.title in path.split('->'):
color[item.title] = 'green'
for item in nodelist:
if item.path == '':
continue
s = item.path.split('->')
for i in range(0,len(s) - 1):
if i == 0:
continue
graph.add_edge(s[i],s[i+1])
values = [color.get(node,'red') for node in graph.nodes()]
pos = nx.spring_layout(graph)
if len(nodelist) > 500:
nx.draw_networdx(graph,node_size=50,node_color=values,font_size = 5)
else:
nx.draw_networkx(graph,node_size=1000,node_color=values,font_size = 10)
plt.savefig('breadthSearch.png')
plt.close()
return None
if __name__ == '__main__':
source = "微软"
dest = "苹果"
bsearch = breadthSearch(source,dest,[],[],[])
path,num,etime,graph = bsearch.search()
plt.show(graph)
| mit |
dinossimpson/pyspeckit | pyspeckit/spectrum/models/ammonia_hf.py | 2 | 3717 | """
===================================================
Ammonia inversion transition: Hyperfine-only fitter
===================================================
.. moduleauthor:: Adam Ginsburg <[email protected]>
Module API
^^^^^^^^^^
"""
import numpy as np
from pyspeckit.mpfit import mpfit
import fitter
import matplotlib.cbook as mpcb
import copy
import collections
from . import hyperfine
from . import radex_modelgrid
from . import model
from ammonia_constants import (line_names, freq_dict, aval_dict, ortho_dict,
voff_lines_dict, tau_wts_dict, line_labels)
from astropy import constants
from astropy import units as u
ckms = constants.c.to(u.km/u.s).value
relative_strength_total_degeneracy = collections.defaultdict(lambda: 1)
# sanity check:
for linename in line_names:
assert len(voff_lines_dict[linename]) == len(tau_wts_dict[linename])
# For each individual inversion line, create a Hyperfine model
nh3_vtau = {linename:
hyperfine.hyperfinemodel({lineid:lineid for lineid,name in
enumerate(voff_lines_dict[linename])},
{lineid:voff for lineid,voff in
enumerate(voff_lines_dict[linename])},
{lineid:freq_dict[linename]*(1-voff/ckms)
for lineid,voff in
enumerate(voff_lines_dict[linename])},
{lineid:tauwt for lineid,tauwt in
enumerate(tau_wts_dict[linename])},
{lineid:1 for lineid,voff in
enumerate(voff_lines_dict[linename])},
)
for linename in line_names}
def nh3_vtau_multimodel_generator(linenames):
"""
If you want to use multiple hyperfines for the same spectrum, use this
generator.
It is useful if you want N independent tau/tex values but the same velocity
and linewidth
Parameters
----------
linenames : list
A list of line names from the set ('oneone', ..., 'eighteight')
Returns
-------
model : `model.SpectralModel`
A SpectralModel class build from N different metastable inversion
hyperfine models
"""
nlines = len(linenames)
def nh3_vtau_multimodel(xarr, velocity, width, *args):
assert len(args) == nlines*2
models = [nh3_vtau[linename].hyperfine(xarr, Tex=tex, tau=tau,
xoff_v=velocity, width=width)
for linename,tex,tau in zip(linenames, args[::2], args[1::2])]
return np.array(models).sum(axis=0)
mod = model.SpectralModel(nh3_vtau_multimodel, 2+nlines*2,
parnames=['center','width'] + [x
for ln in linenames
for x in ('tex{0}'.format(ln),
'tau{0}'.format(ln))
],
parlimited=[(False,False), (True,False),] + [(True, False),]*2*nlines,
parlimits=[(0,0), ]*(2+2*nlines),
shortvarnames=["v","\\sigma",] + [x
for ln in linenames
for x in
('T_{{ex}}({0})'.format(line_labels[ln]),
'\\tau({0})'.format(line_labels[ln]))
],
fitunits='Hz')
return mod
| mit |
samuxiii/prototypes | learning/stock-app/app.py | 1 | 1429 | from flask import Flask
from stock import Stock, Helper
import pandas as pd
import numpy as np
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello, man!"
@app.route("/stock")
def stock():
return predictStock(Stock(), Helper())
'''
Main functions
'''
def predictStock(stock, helper):
data = stock.gettingData()
#Store the last date. Useful for print outs
prediction_date = pd.to_datetime(data.iloc[-1].snapped_at).strftime("%Y-%m-%d")
data = stock.preprocessing(data)
data_train, data_test = stock.get_train_test(data)
data_train, data_test, scaler = stock.scale(data_train, data_test)
#preparing data
features = ['price', 'market_cap', 'total_volume']
X_train = helper.prepare_sequence(data_train[features])
#customize y_train for sequence
y_train = data_train.iloc[6:].closed_price.values
#fit the model
model = stock.build_model()
model = stock.training(model, X_train, y_train)
#Predicting
X_test = helper.prepare_sequence(data_test[features])
last_sequence = X_test[-1].reshape(1,7,3)
pred = model.predict(last_sequence).item((0,0))
#recover real value (not normalized)
pred = scaler.inverse_transform(np.array([[pred,0,0,0]]))
result = "Prediction for {}: {:.2f}".format(prediction_date, pred.item((0,0)))
print(result)
return result
'''
Main program
'''
if __name__ == '__main__':
app.run()
| mit |
nicococo/AdaScreen | adascreen/adascreen.py | 1 | 10271 | import numpy as np
import sklearn.linear_model as lm
from screening_rules import AbstractScreeningRule
class AdaScreen(AbstractScreeningRule):
""" Adaptive Lasso Screening with halfspace constraints. """
sphere_rule = None # screening rule that produces sphere center (o) and radius (rho)
local_hs_rules = None # list of local (=expensive) halfspace constraint returning screening rules
global_hs_rules = None # list of global halfspace constraint returning screening rules
A = None
b = None
normA = None
debug = False
def __init__(self, sphere_rule, tol=1e-9, debug=False):
#AbstractScreeningRule.__init__(self, 'AdaScreen (o){0}'.format(sphere_rule.name), tol)
AbstractScreeningRule.__init__(self, 'AdaScreen:(o){0}'.format(sphere_rule.name), tol)
self.sphere_rule = sphere_rule
self.local_hs_rules = []
self.global_hs_rules = []
self.debug = debug
def add_local_hs_rule(self, rule):
self.local_hs_rules.append(rule)
self.name = '{0}+(/){1}'.format(self.name, rule.name)
def add_global_hs_rule(self, rule):
self.global_hs_rules.append(rule)
self.name = '{0}+(/){1}'.format(self.name, rule.name)
def init(self, lmax, lmax_x, X, y, normX, normy, path):
print('AdaScreen initialize global halfspace constraints.')
(self.A, self.b, self.normA) = self.get_global_halfspaces(lmax, lmax_x, X, y, normX, normy)
def get_sphere(self, l, l0, lmax, lmax_x, beta, X, y, normX, normy, nz, intervals):
return self.sphere_rule.get_sphere(l, l0, lmax, lmax_x, beta, X, y, normX, normy, nz, intervals)
def screen(self, l, l0, lmax, lmax_x, beta, X, y, normX, normy, nz, intervals):
(DIMS, EXMS) = X.shape
(o, rho) = self.sphere_rule.get_sphere(l, l0, lmax, lmax_x, beta, X, y, normX, normy, nz, intervals)
# screening based on sphere constraint
theta = (y - X[nz,:].T.dot(beta[nz])) / l0
lhs = X.dot(o)
rhs = 1.0 - normX*rho
inds = np.where(np.abs(lhs) >= rhs-self.tol)[0]
# if there are no constraints, then don't bother
(A, b, normA) = self.get_local_halfspaces(o, l, l0, lmax, lmax_x, beta, X, y, normX, normy, nz, intervals)
# (a) only local constraints or ..
# (b) no constraints, no worries
if b.size==0 & self.b.size==0:
return (inds, intervals)
# (c) only global constraints
if b.size==0 & self.b.size>0:
A = self.A
b = self.b
normA = self.normA
else:
# (d) mixed constraints
if b.size>0 & self.b.size>0:
A = np.concatenate((A, self.A))
b = np.append(b, self.b)
normA = np.append(normA, self.normA)
# pre-calculations
prod_jk = X[inds,:].dot(A.T)
prod_ko = A.dot(o)
# distance to origin for each hyperplane r \in K x 1
r_k = (b - prod_ko) / normA # element-wise multiplication and division
# change sign according to case
r_inds = np.where(r_k >= 0.0)[0]
r_mul = np.ones(b.size)
r_mul[r_inds] = -1.0
r_k = np.abs(r_k)
#print 'Constraints x Datapoints {0}'.format(A.shape)
cosines_alpha = prod_jk / (normX[inds].reshape(len(inds),1) * normA) # J x K
sines_alpha = np.sqrt( np.maximum(1.0-cosines_alpha**2, 0.0) ) # J X K: the inner element-wise maximum(.) is due to numerics
rhos_plus = self.screen_inner(r_k, r_mul, rho, cosines_alpha, sines_alpha)
#rhos_plus = self.screen_inner_dbg_icml(r_k, r_mul, rho, cosines_alpha, sines_alpha, prod_ko, b)
S_plus = lhs[inds] + normX[inds]*rhos_plus
rhos_minus = self.screen_inner(r_k, r_mul, rho, -cosines_alpha, sines_alpha)
#rhos_minus = self.screen_inner_dbg_icml(r_k, r_mul, rho, -cosines_alpha, sines_alpha, prod_ko, b)
S_minus = -lhs[inds] + normX[inds]*rhos_minus
S = np.max((S_plus, S_minus), axis=0)
active = np.where(S >= 1.0 - self.tol)[0]
#print inds.size-active.size
if self.debug:
#print 'AdaScreen DEBUG START'
(prodjk_dbg, cos_dbg, sin_dbg) = self.cosines_dbg(X[inds,:], A, normX[inds], normA)
(rows, cols) = np.where(np.abs(prodjk_dbg-prod_jk)>1e-6)
if rows.size>0:
print 'PROD_JK:'
print (rows, cols)
(rows, cols) = np.where(np.abs(cos_dbg-cosines_alpha)>1e-6)
if rows.size>0:
print 'COS_ALPHA:'
print (rows, cols)
(rows, cols) = np.where(np.abs(sin_dbg-sines_alpha)>1e-6)
if rows.size>0:
print 'SIN_ALPHA:'
print (rows, cols)
print normX
print normy
rhos_plus_dbg = self.screen_inner_dbg(r_k, r_mul, rho, cos_dbg, sin_dbg)
rhos_minus_dbg = self.screen_inner_dbg(r_k, r_mul, rho, -cos_dbg, sin_dbg)
#print 'AdaScreen DEBUG END'
#raw_input("Press Enter to continue...")
#rhos_min = np.min((rhos_plus, rhos_minus), axis=0)
#active = np.where(np.abs(lhs[inds])>=1.0 - normX[inds]*rhos_min - self.tol)[0]
return (inds[active], intervals)
def cosines_dbg(self, X, A, normX, normA):
prod_jk = np.zeros((X.shape[0], A.shape[0]))
for j in range(X.shape[0]):
for k in range(A.shape[0]):
for n in range(A.shape[1]):
prod_jk[j,k] += X[j,n] * A[k,n]
cos_alpha = np.zeros(prod_jk.shape)
sin_alpha = np.zeros(prod_jk.shape)
for j in range(prod_jk.shape[0]):
for k in range(prod_jk.shape[1]):
cos_alpha[j,k] = prod_jk[j,k] / (normX[j]*normA[k])
sin_alpha[j,k] = np.sqrt( np.maximum(1.0 - cos_alpha[j,k]*cos_alpha[j,k], 0.))
return (prod_jk, cos_alpha, sin_alpha)
def screen_inner(self, r, r_mul, rho, cos_alpha, sin_alpha):
rhos_prime = rho*np.ones(sin_alpha.shape) # J x K
(rows, cols) = np.where(cos_alpha-r/rho>0.0)
if any(rho**2 - r[cols]**2)<0.0:
print 'dsdfgdggds'
values = np.maximum(rho**2 - (np.sqrt(rho**2 - r[cols]**2) * cos_alpha[rows, cols] + r_mul[cols]*r[cols]*sin_alpha[rows, cols])**2, 0.0)
rhos_prime[rows, cols] = np.sqrt(values)
return np.min(rhos_prime, axis=1) # J x 1
def screen_inner_dbg(self, r, r_mul, rho, cos_alpha, sin_alpha):
(J, K) = sin_alpha.shape
rhos_prime = rho*np.ones(sin_alpha.shape) # J x K
for j in range(J):
for k in range(K):
if cos_alpha[j,k]>r[k]/rho:
#print (j,k)
#print rho**2 - (np.sqrt(rho**2 - r[k]**2) * cos_alpha[j,k] + r_mul[k]*r[k]*sin_alpha[j,k])**2
value = rho**2 - (np.sqrt(rho**2 - r[k]**2) * cos_alpha[j,k] + r_mul[k]*r[k]*sin_alpha[j,k])**2
if value<0.0:
print value
value = 0.0
rhos_prime[j,k] = np.sqrt(value)
return np.min(rhos_prime, axis=1) # J x 1
def screen_inner_dbg_icml(self, r, r_mul, rho, cos_alpha, sin_alpha, prod_ko, b):
(J, K) = sin_alpha.shape
#print prod_ko.shape
#print b.shape
#print b
rhos_prime = rho*np.ones(sin_alpha.shape) # J x K
for j in range(J):
for k in range(K):
if (cos_alpha[j,k]>r[k]/rho and b[k]-prod_ko[k]>=0):
value = rho**2 - (np.sqrt(rho**2 - r[k]**2) * cos_alpha[j,k] + r_mul[k]*r[k]*sin_alpha[j,k])**2
if value<0.0:
print value
value = 0.0
rhos_prime[j,k] = np.sqrt(value)
if (sin_alpha[j,k]<=r[k]/rho and b[k]-prod_ko[k]<0):
value = rho**2 - (np.sqrt(rho**2 - r[k]**2) * cos_alpha[j,k] + r_mul[k]*r[k]*sin_alpha[j,k])**2
if value<0.0:
print value
value = 0.0
rhos_prime[j,k] = np.sqrt(value)
if (sin_alpha[j,k]>r[k]/rho and b[k]-prod_ko[k]<0):
value = np.sqrt(rho**2 - r[k]**2) * sin_alpha[j,k] - r[k]*cos_alpha[j,k]
rhos_prime[j,k] = value
return np.min(rhos_prime, axis=1) # J x 1
def get_local_halfspaces(self, o, l, l0, lmax, lmax_x, beta, X, y, normX, normy, nz, intervals):
# gather halfspace constraints
A = None
b = np.array([])
normA = None
doInit = True
for rule in self.local_hs_rules:
#print('Getting halfspace constraints of {0}..'.format(rule))
(ak, bk, normak) = rule.get_local_halfspaces(o, l, l0, lmax, lmax_x, beta, X, y, normX, normy, nz, intervals)
if doInit and ak.size>0:
A = ak
b = bk
normA = normak
doInit = False
elif ak.size>0:
A = np.concatenate((A, ak))
b = np.append(b, bk)
normA = np.append(normA, normak)
# returns a_k, b_k and ||a_k||
# A \in R^(K x N)
# b \in R^K
# normA \in R_+^K
#print A.shape
return (A, b, normA)
def get_global_halfspaces(self, lmax, lmax_x, X, y, normX, normy):
# gather halfspace constraints
A = None
b = np.array([])
normA = None
doInit = True
for rule in self.global_hs_rules:
#print('Getting halfspace constraints of {0}..'.format(rule))
(ak, bk, normak) = rule.get_global_halfspaces(lmax, lmax_x, X, y, normX, normy)
if doInit:
A = ak
b = bk
normA = normak
doInit = False
else:
A = np.concatenate((A, ak))
b = np.append(b, bk)
normA = np.append(normA, normak)
# returns a_k, b_k and ||a_k||
# A \in R^(K x N)
# b \in R^K
# normA \in R_+^K
#print A.shape
return (A, b, normA)
| mit |
rajat1994/scikit-learn | examples/svm/plot_oneclass.py | 249 | 2302 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.Blues_r)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |
hugobowne/scikit-learn | sklearn/neighbors/base.py | 30 | 30564 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array, _get_n_jobs, gen_even_slices
from ..utils.fixes import argpartition
from ..utils.multiclass import check_classification_targets
from ..externals import six
from ..externals.joblib import Parallel, delayed
from ..exceptions import NotFittedError
from ..exceptions import DataConversionWarning
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1):
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
self.n_jobs = n_jobs
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
if metric == 'precomputed':
alg_check = 'brute'
else:
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if ((self.n_neighbors is None or
self.n_neighbors < self._fit_X.shape[0] // 2) and
self.metric != 'precomputed'):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
if self.n_neighbors is not None:
if self.n_neighbors <= 0:
raise ValueError(
"Expected n_neighbors > 0. Got %d" %
self.n_neighbors
)
return self
@property
def _pairwise(self):
# For cross-validation routines to split data correctly
return self.metric == 'precomputed'
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([[1., 1., 1.]])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
train_size = self._fit_X.shape[0]
if n_neighbors > train_size:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(train_size, n_neighbors)
)
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
n_jobs = _get_n_jobs(self.n_jobs)
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
n_jobs=n_jobs, squared=True)
else:
dist = pairwise_distances(
X, self._fit_X, self.effective_metric_, n_jobs=n_jobs,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = Parallel(n_jobs, backend='threading')(
delayed(self._tree.query, check_pickle=False)(
X[s], n_neighbors, return_distance)
for s in gen_even_slices(X.shape[0], n_jobs)
)
if return_distance:
dist, neigh_ind = tuple(zip(*result))
result = np.vstack(dist), np.vstack(neigh_ind)
else:
result = np.vstack(result)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_samples, n_neighbors - 1))
if return_distance:
dist = np.reshape(
dist[sample_mask], (n_samples, n_neighbors - 1))
return dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# kneighbors does the None handling.
if X is not None:
X = check_array(X, accept_sparse='csr')
n_samples1 = X.shape[0]
else:
n_samples1 = self._fit_X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones(n_samples1 * n_neighbors)
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
A_data, A_ind = self.kneighbors(
X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
return kneighbors_graph
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([[1., 1., 1.]])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[ 1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
n_samples = X.shape[0]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind_list = [np.where(d <= radius)[0] for d in dist]
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
neigh_ind = np.empty(n_samples, dtype='object')
neigh_ind[:] = neigh_ind_list
if return_distance:
dist_array = np.empty(n_samples, dtype='object')
if self.effective_metric_ == 'euclidean':
dist_list = [np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)]
else:
dist_list = [d[neigh_ind[i]]
for i, d in enumerate(dist)]
dist_array[:] = dist_list
results = dist_array, neigh_ind
else:
results = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
results = results[::-1]
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
dist[ind] = dist[ind][mask]
if return_distance:
return dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if X is not None:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples2 = self._fit_X.shape[0]
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
check_classification_targets(y)
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
"""
return self._fit(X)
| bsd-3-clause |
Anjum48/pymc3 | pymc3/examples/GHME_2013.py | 13 | 2242 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
from pylab import *
import pandas as pd
from pymc3 import *
from pymc3.distributions.timeseries import *
# <markdowncell>
# Data
# ----
# <codecell>
data = pd.read_csv(get_data_file('pymc3.examples', 'data/pancreatitis.csv'))
countries = ['CYP', 'DNK', 'ESP', 'FIN','GBR', 'ISL']
data = data[data.area.isin(countries)]
age = data['age'] = np.array(data.age_start + data.age_end)/2
rate = data.value = data.value*1000
group, countries = pd.factorize(data.area, order=countries)
ncountries = len(countries)
# <codecell>
for i, country in enumerate(countries):
subplot(2,3,i+1)
title(country)
d = data[data.area == country]
plot(d.age, d.value, '.')
ylim(0,rate.max())
# <markdowncell>
# Model Specification
# -------------------
# <codecell>
nknots = 10
knots = np.linspace(data.age_start.min(),data.age_end.max(), nknots)
def interpolate(x0,y0, x, group):
x = np.array(x)
group = np.array(group)
idx = np.searchsorted(x0, x)
dl = np.array(x - x0[idx - 1])
dr = np.array(x0[idx] - x)
d=dl+dr
wl = dr/d
return wl*y0[idx-1, group] + (1-wl)*y0[idx, group]
with Model() as model:
coeff_sd = T('coeff_sd', 10, 1, 5**-2)
y = GaussianRandomWalk('y', sd=coeff_sd, shape = (nknots, ncountries))
p = interpolate(knots, y, age, group)
sd = T('sd', 10, 2, 5**-2)
vals = Normal('vals', p, sd=sd, observed = rate)
# <markdowncell>
# Model Fitting
# -------------
# <codecell>
with model:
s = find_MAP( vars=[sd, y])
step = NUTS(scaling = s)
trace = sample(100, step, s)
s = trace[-1]
step = NUTS(scaling = s)
def run(n=3000):
if n == "short":
n = 150
with model:
trace = sample(n, step, s)
# <codecell>
for i, country in enumerate(countries):
subplot(2,3,i+1)
title(country)
d = data[data.area == country]
plot(d.age, d.value, '.')
plot(knots, trace[y][::5,:,i].T, color ='r', alpha =.01);
ylim(0,rate.max())
# <codecell>
traceplot(trace[100:], vars = [coeff_sd,sd ]);
# <codecell>
autocorrplot(trace, vars = [coeff_sd,sd ])
if __name__ == '__main__':
run()
| apache-2.0 |
cpcloud/ibis | ibis/tests/all/test_join.py | 1 | 1410 | import pandas as pd
import pytest
from pytest import param
@pytest.fixture(scope='module')
def left(batting):
return batting[batting.yearID == 2015]
@pytest.fixture(scope='module')
def right(awards_players):
return awards_players[awards_players.lgID == 'NL']
@pytest.fixture(scope='module')
def left_df(left):
return left.execute()
@pytest.fixture(scope='module')
def right_df(right):
return right.execute()
@pytest.mark.skip
@pytest.mark.parametrize(
'how',
[
'inner',
'left',
'right',
'outer',
param(
'semi',
marks=pytest.mark.xfail(
raises=NotImplementedError, reason='Semi join not implemented'
),
),
param(
'anti',
marks=pytest.mark.xfail(
raises=NotImplementedError, reason='Anti join not implemented'
),
),
],
)
def test_join_project_left_table(
backend, con, left, right, left_df, right_df, how
):
predicate = ['playerID']
expr = left.join(right, predicate, how=how)[left]
with backend.skip_unsupported():
result = expr.execute()
joined = pd.merge(
left_df, right_df, how=how, on=predicate, suffixes=('', '_y')
)
expected = joined[list(left.columns)]
backend.assert_frame_equal(
result[expected.columns], expected, check_like=True
)
| apache-2.0 |
murali-munna/scikit-learn | benchmarks/bench_plot_omp_lars.py | 266 | 4447 | """Benchmarks of orthogonal matching pursuit (:ref:`OMP`) versus least angle
regression (:ref:`least_angle_regression`)
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path, orthogonal_mp
from sklearn.datasets.samples_generator import make_sparse_coded_signal
def compute_bench(samples_range, features_range):
it = 0
results = dict()
lars = np.empty((len(features_range), len(samples_range)))
lars_gram = lars.copy()
omp = lars.copy()
omp_gram = lars.copy()
max_it = len(samples_range) * len(features_range)
for i_s, n_samples in enumerate(samples_range):
for i_f, n_features in enumerate(features_range):
it += 1
n_informative = n_features / 10
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
# dataset_kwargs = {
# 'n_train_samples': n_samples,
# 'n_test_samples': 2,
# 'n_features': n_features,
# 'n_informative': n_informative,
# 'effective_rank': min(n_samples, n_features) / 10,
# #'effective_rank': None,
# 'bias': 0.0,
# }
dataset_kwargs = {
'n_samples': 1,
'n_components': n_features,
'n_features': n_samples,
'n_nonzero_coefs': n_informative,
'random_state': 0
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
y, X, _ = make_sparse_coded_signal(**dataset_kwargs)
X = np.asfortranarray(X)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, Gram=None, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (with Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (without Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=False,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp[i_f, i_s] = delta
results['time(LARS) / time(OMP)\n (w/ Gram)'] = (lars_gram / omp_gram)
results['time(LARS) / time(OMP)\n (w/o Gram)'] = (lars / omp)
return results
if __name__ == '__main__':
samples_range = np.linspace(1000, 5000, 5).astype(np.int)
features_range = np.linspace(1000, 5000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(np.max(t) for t in results.values())
import pylab as pl
fig = pl.figure('scikit-learn OMP vs. LARS benchmark results')
for i, (label, timings) in enumerate(sorted(results.iteritems())):
ax = fig.add_subplot(1, 2, i)
vmax = max(1 - timings.min(), -1 + timings.max())
pl.matshow(timings, fignum=False, vmin=1 - vmax, vmax=1 + vmax)
ax.set_xticklabels([''] + map(str, samples_range))
ax.set_yticklabels([''] + map(str, features_range))
pl.xlabel('n_samples')
pl.ylabel('n_features')
pl.title(label)
pl.subplots_adjust(0.1, 0.08, 0.96, 0.98, 0.4, 0.63)
ax = pl.axes([0.1, 0.08, 0.8, 0.06])
pl.colorbar(cax=ax, orientation='horizontal')
pl.show()
| bsd-3-clause |
jdanbrown/pydatalab | google/datalab/ml/_confusion_matrix.py | 2 | 4449 | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
import numpy as np
import json
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.metrics import confusion_matrix
import google.datalab.bigquery as bq
from . import _util
class ConfusionMatrix(object):
"""Represents a confusion matrix."""
def __init__(self, cm, labels):
"""
Args:
cm: a 2-dimensional matrix with row index being target, column index being predicted,
and values being count.
labels: the labels whose order matches the row/column indexes.
"""
self._cm = cm
self._labels = labels
@staticmethod
def from_csv(input_csv, headers=None, schema_file=None):
"""Create a ConfusionMatrix from a csv file.
Args:
input_csv: Path to a Csv file (with no header). Can be local or GCS path.
headers: Csv headers. If present, it must include 'target' and 'predicted'.
schema_file: Path to a JSON file containing BigQuery schema. Used if "headers" is None.
If present, it must include 'target' and 'predicted' columns.
Returns:
A ConfusionMatrix that can be plotted.
Raises:
ValueError if both headers and schema_file are None, or it does not include 'target'
or 'predicted' columns.
"""
if headers is not None:
names = headers
elif schema_file is not None:
with _util.open_local_or_gcs(schema_file, mode='r') as f:
schema = json.load(f)
names = [x['name'] for x in schema]
else:
raise ValueError('Either headers or schema_file is needed')
all_files = _util.glob_files(input_csv)
all_df = []
for file_name in all_files:
with _util.open_local_or_gcs(file_name, mode='r') as f:
all_df.append(pd.read_csv(f, names=names))
df = pd.concat(all_df, ignore_index=True)
if 'target' not in df or 'predicted' not in df:
raise ValueError('Cannot find "target" or "predicted" column')
labels = sorted(set(df['target']) | set(df['predicted']))
cm = confusion_matrix(df['target'], df['predicted'], labels=labels)
return ConfusionMatrix(cm, labels)
@staticmethod
def from_bigquery(sql):
"""Create a ConfusionMatrix from a BigQuery table or query.
Args:
sql: Can be one of:
A SQL query string.
A Bigquery table string.
A Query object defined with '%%bq query --name [query_name]'.
The query results or table must include "target", "predicted" columns.
Returns:
A ConfusionMatrix that can be plotted.
Raises:
ValueError if query results or table does not include 'target' or 'predicted' columns.
"""
if isinstance(sql, bq.Query):
sql = sql._expanded_sql()
parts = sql.split('.')
if len(parts) == 1 or len(parts) > 3 or any(' ' in x for x in parts):
sql = '(' + sql + ')' # query, not a table name
else:
sql = '`' + sql + '`' # table name
query = bq.Query(
'SELECT target, predicted, count(*) as count FROM %s group by target, predicted' % sql)
df = query.execute().result().to_dataframe()
labels = sorted(set(df['target']) | set(df['predicted']))
labels_count = len(labels)
df['target'] = [labels.index(x) for x in df['target']]
df['predicted'] = [labels.index(x) for x in df['predicted']]
cm = [[0] * labels_count for i in range(labels_count)]
for index, row in df.iterrows():
cm[row['target']][row['predicted']] = row['count']
return ConfusionMatrix(cm, labels)
def plot(self):
"""Plot the confusion matrix."""
plt.imshow(self._cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.title('Confusion matrix')
plt.colorbar()
tick_marks = np.arange(len(self._labels))
plt.xticks(tick_marks, self._labels, rotation=45)
plt.yticks(tick_marks, self._labels)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
| apache-2.0 |
stetie/postpic | examples/particleshapedemo.py | 2 | 6516 | #!/usr/bin/env python
#
# This file is part of postpic.
#
# postpic is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# postpic is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with postpic. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright Stephan Kuschel 2015-2018
#
'''
This is a demonstration file to show the differences between various particles
shapes used.
'''
def main():
import numpy as np
import postpic as pp
# postpic will use matplotlib for plotting. Changing matplotlibs backend
# to "Agg" makes it possible to save plots without a display attached.
# This is necessary to run this example within the "run-tests" script
# on travis-ci.
import matplotlib; matplotlib.use('Agg')
# choose the dummy reader. This reader will create fake data for testing.
pp.chooseCode('dummy')
# Create a dummy reader with 300 particles, not initialized with a seed and use
# uniform distribution
dr = pp.readDump(300, seed=None, randfunc=np.random.random)
# set and create directory for pictures.
savedir = '_examplepictures/'
import os
if not os.path.exists(savedir):
os.mkdir(savedir)
# initialze the plotter object.
# project name will be prepended to all output names
plotter = pp.plotting.plottercls(dr, outdir=savedir, autosave=True, project='particleshapedemo')
# we will need a refrence to the MultiSpecies quite often
from postpic import MultiSpecies as MS
# create MultiSpecies object for every particle species that exists.
pas = [MS(dr, s) for s in dr.listSpecies()]
# --- 1D visualization of particle contributions ---
def particleshapedemo(shape):
from postpic.particles import histogramdd
import matplotlib.pyplot as plt
ptclpos = np.array([4.5, 9.75, 15.0, 20.25])
y, (edges, ) = histogramdd(ptclpos, bins=25, range=(0,25), shape=shape)
x = np.convolve(edges, [0.5, 0.5], mode='valid')
fig = plt.figure()
fig.suptitle('ParticleShape: {:s}'.format(str(shape)))
ax = fig.add_subplot(111)
ax.plot(x,y)
ax.set_ylim((0,1))
ax.set_xticks(x, minor=True)
ax.grid(which='minor')
for ix in ptclpos:
ax.axvline(x=ix, color='y')
fig.savefig(savedir + 'particleshapedemo{:s}.png'.format(str(shape)), dpi=160)
plt.close(fig)
if True:
particleshapedemo(0)
particleshapedemo(1)
particleshapedemo(2)
particleshapedemo(3)
# --- 1D ---
if True:
pa = pas[0]
plotargs = {'ylim': (0,1600), 'log10plot': False}
# 1 particle per cell
plotter.plotField(pa.createField('x', bins=300, shape=0, title='1ppc_order0', rangex=(0,1)), **plotargs)
plotter.plotField(pa.createField('x', bins=300, shape=1, title='1ppc_order1', rangex=(0,1)), **plotargs)
plotter.plotField(pa.createField('x', bins=300, shape=2, title='1ppc_order2', rangex=(0,1)), **plotargs)
plotter.plotField(pa.createField('x', bins=300, shape=3, title='1ppc_order3', rangex=(0,1)), **plotargs)
# 3 particles per cell
plotter.plotField(pa.createField('x', bins=100, shape=0, title='3ppc_order0', rangex=(0,1)), **plotargs)
plotter.plotField(pa.createField('x', bins=100, shape=1, title='3ppc_order1', rangex=(0,1)), **plotargs)
plotter.plotField(pa.createField('x', bins=100, shape=2, title='3ppc_order2', rangex=(0,1)), **plotargs)
plotter.plotField(pa.createField('x', bins=100, shape=3, title='3ppc_order3', rangex=(0,1)), **plotargs)
# 10 particles per cell
plotter.plotField(pa.createField('x', bins=30, shape=0, title='10ppc_order0', rangex=(0,1)), **plotargs)
plotter.plotField(pa.createField('x', bins=30, shape=1, title='10ppc_order1', rangex=(0,1)), **plotargs)
plotter.plotField(pa.createField('x', bins=30, shape=2, title='10ppc_order2', rangex=(0,1)), **plotargs)
plotter.plotField(pa.createField('x', bins=30, shape=3, title='10ppc_order3', rangex=(0,1)), **plotargs)
# --- 2D ---
if True:
dr = pp.readDump(300*30, seed=None, randfunc=np.random.random)
pa = MS(dr, dr.listSpecies()[0])
plotargs = {'clim': (0,3e4), 'log10plot': False}
# 1 particle per cell
plotter.plotField(pa.createField('x', 'y', bins=(300,30), shape=0, title='1ppc_order0', rangex=(0,1), rangey=(0,1)), **plotargs)
plotter.plotField(pa.createField('x', 'y', bins=(300,30), shape=1, title='1ppc_order1', rangex=(0,1), rangey=(0,1)), **plotargs)
plotter.plotField(pa.createField('x', 'y', bins=(300,30), shape=2, title='1ppc_order2', rangex=(0,1), rangey=(0,1)), **plotargs)
plotter.plotField(pa.createField('x', 'y', bins=(300,30), shape=3, title='1ppc_order3', rangex=(0,1), rangey=(0,1)), **plotargs)
# 3 particles per cell
plotter.plotField(pa.createField('x', 'y', bins=(100,10), shape=0, title='3ppc_order0', rangex=(0,1), rangey=(0,1)), **plotargs)
plotter.plotField(pa.createField('x', 'y', bins=(100,10), shape=1, title='3ppc_order1', rangex=(0,1), rangey=(0,1)), **plotargs)
plotter.plotField(pa.createField('x', 'y', bins=(100,10), shape=2, title='3ppc_order2', rangex=(0,1), rangey=(0,1)), **plotargs)
plotter.plotField(pa.createField('x', 'y', bins=(100,10), shape=3, title='3ppc_order3', rangex=(0,1), rangey=(0,1)), **plotargs)
# --- 3D ---
if True:
dr = pp.readDump(300*30, seed=None, randfunc=np.random.random, dimensions=3)
pa = MS(dr, dr.listSpecies()[0])
# just try to create the field. not plotting routines yet
f = pa.createField('x', 'y', 'z', bins=(30,30,10), shape=2, title='1ppc_order2', rangex=(0,1), rangey=(0,1), rangez=(0,1))
f = pa.createField('x', 'y', 'z', bins=(30,30,10), shape=3, title='1ppc_order3', rangex=(0,1), rangey=(0,1), rangez=(0,1))
if __name__=='__main__':
main()
| gpl-3.0 |
Myasuka/scikit-learn | sklearn/utils/arpack.py | 265 | 64837 | """
This contains a copy of the future version of
scipy.sparse.linalg.eigen.arpack.eigsh
It's an upgraded wrapper of the ARPACK library which
allows the use of shift-invert mode for symmetric matrices.
Find a few eigenvectors and eigenvalues of a matrix.
Uses ARPACK: http://www.caam.rice.edu/software/ARPACK/
"""
# Wrapper implementation notes
#
# ARPACK Entry Points
# -------------------
# The entry points to ARPACK are
# - (s,d)seupd : single and double precision symmetric matrix
# - (s,d,c,z)neupd: single,double,complex,double complex general matrix
# This wrapper puts the *neupd (general matrix) interfaces in eigs()
# and the *seupd (symmetric matrix) in eigsh().
# There is no Hermetian complex/double complex interface.
# To find eigenvalues of a Hermetian matrix you
# must use eigs() and not eigsh()
# It might be desirable to handle the Hermetian case differently
# and, for example, return real eigenvalues.
# Number of eigenvalues returned and complex eigenvalues
# ------------------------------------------------------
# The ARPACK nonsymmetric real and double interface (s,d)naupd return
# eigenvalues and eigenvectors in real (float,double) arrays.
# Since the eigenvalues and eigenvectors are, in general, complex
# ARPACK puts the real and imaginary parts in consecutive entries
# in real-valued arrays. This wrapper puts the real entries
# into complex data types and attempts to return the requested eigenvalues
# and eigenvectors.
# Solver modes
# ------------
# ARPACK and handle shifted and shift-inverse computations
# for eigenvalues by providing a shift (sigma) and a solver.
__docformat__ = "restructuredtext en"
__all__ = ['eigs', 'eigsh', 'svds', 'ArpackError', 'ArpackNoConvergence']
import warnings
from scipy.sparse.linalg.eigen.arpack import _arpack
import numpy as np
from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator
from scipy.sparse import identity, isspmatrix, isspmatrix_csr
from scipy.linalg import lu_factor, lu_solve
from scipy.sparse.sputils import isdense
from scipy.sparse.linalg import gmres, splu
import scipy
from distutils.version import LooseVersion
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'}
_ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12}
DNAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found. IPARAM(5) "
"returns the number of wanted converged Ritz values.",
2: "No longer an informational error. Deprecated starting "
"with release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the "
"Implicitly restarted Arnoldi iteration. One possibility "
"is to increase the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation;",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible.",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated."
}
SNAUPD_ERRORS = DNAUPD_ERRORS
ZNAUPD_ERRORS = DNAUPD_ERRORS.copy()
ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3."
CNAUPD_ERRORS = ZNAUPD_ERRORS
DSAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found.",
2: "No longer an informational error. Deprecated starting with "
"release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the Implicitly "
"restarted Arnoldi iteration. One possibility is to increase "
"the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from trid. eigenvalue calculation; "
"Informational error from LAPACK routine dsteqr .",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible. ",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated.",
}
SSAUPD_ERRORS = DSAUPD_ERRORS
DNEUPD_ERRORS = {
0: "Normal exit.",
1: "The Schur form computed by LAPACK routine dlahqr "
"could not be reordered by LAPACK routine dtrsen. "
"Re-enter subroutine dneupd with IPARAM(5)NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least NCV "
"columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from calculation of a real Schur form. "
"Informational error from LAPACK routine dlahqr .",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine dtrevc.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "DNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "DNEUPD got a different count of the number of converged "
"Ritz values than DNAUPD got. This indicates the user "
"probably made an error in passing data from DNAUPD to "
"DNEUPD or that the data was modified before entering "
"DNEUPD",
}
SNEUPD_ERRORS = DNEUPD_ERRORS.copy()
SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr "
"could not be reordered by LAPACK routine strsen . "
"Re-enter subroutine dneupd with IPARAM(5)=NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.")
SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of "
"converged Ritz values than SNAUPD got. This indicates "
"the user probably made an error in passing data from "
"SNAUPD to SNEUPD or that the data was modified before "
"entering SNEUPD")
ZNEUPD_ERRORS = {0: "Normal exit.",
1: "The Schur form computed by LAPACK routine csheqr "
"could not be reordered by LAPACK routine ztrsen. "
"Re-enter subroutine zneupd with IPARAM(5)=NCV and "
"increase the size of the array D to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 1 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation. "
"This should never happened.",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine ztrevc.",
-10: "IPARAM(7) must be 1,2,3",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "ZNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "ZNEUPD got a different count of the number of "
"converged Ritz values than ZNAUPD got. This "
"indicates the user probably made an error in passing "
"data from ZNAUPD to ZNEUPD or that the data was "
"modified before entering ZNEUPD"}
CNEUPD_ERRORS = ZNEUPD_ERRORS.copy()
CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of "
"converged Ritz values than CNAUPD got. This indicates "
"the user probably made an error in passing data from "
"CNAUPD to CNEUPD or that the data was modified before "
"entering CNEUPD")
DSEUPD_ERRORS = {
0: "Normal exit.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: ("Error return from trid. eigenvalue calculation; "
"Information error from LAPACK routine dsteqr."),
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "NEV and WHICH = 'BE' are incompatible.",
-14: "DSAUPD did not find any eigenvalues to sufficient accuracy.",
-15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.",
-16: "HOWMNY = 'S' not yet implemented",
-17: ("DSEUPD got a different count of the number of converged "
"Ritz values than DSAUPD got. This indicates the user "
"probably made an error in passing data from DSAUPD to "
"DSEUPD or that the data was modified before entering "
"DSEUPD.")
}
SSEUPD_ERRORS = DSEUPD_ERRORS.copy()
SSEUPD_ERRORS[-14] = ("SSAUPD did not find any eigenvalues "
"to sufficient accuracy.")
SSEUPD_ERRORS[-17] = ("SSEUPD got a different count of the number of "
"converged "
"Ritz values than SSAUPD got. This indicates the user "
"probably made an error in passing data from SSAUPD to "
"SSEUPD or that the data was modified before entering "
"SSEUPD.")
_SAUPD_ERRORS = {'d': DSAUPD_ERRORS,
's': SSAUPD_ERRORS}
_NAUPD_ERRORS = {'d': DNAUPD_ERRORS,
's': SNAUPD_ERRORS,
'z': ZNAUPD_ERRORS,
'c': CNAUPD_ERRORS}
_SEUPD_ERRORS = {'d': DSEUPD_ERRORS,
's': SSEUPD_ERRORS}
_NEUPD_ERRORS = {'d': DNEUPD_ERRORS,
's': SNEUPD_ERRORS,
'z': ZNEUPD_ERRORS,
'c': CNEUPD_ERRORS}
# accepted values of parameter WHICH in _SEUPD
_SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE']
# accepted values of parameter WHICH in _NAUPD
_NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
class ArpackError(RuntimeError):
"""
ARPACK error
"""
def __init__(self, info, infodict=_NAUPD_ERRORS):
msg = infodict.get(info, "Unknown error")
RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg))
class ArpackNoConvergence(ArpackError):
"""
ARPACK iteration did not converge
Attributes
----------
eigenvalues : ndarray
Partial result. Converged eigenvalues.
eigenvectors : ndarray
Partial result. Converged eigenvectors.
"""
def __init__(self, msg, eigenvalues, eigenvectors):
ArpackError.__init__(self, -1, {-1: msg})
self.eigenvalues = eigenvalues
self.eigenvectors = eigenvectors
class _ArpackParams(object):
def __init__(self, n, k, tp, mode=1, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
if k <= 0:
raise ValueError("k must be positive, k=%d" % k)
if maxiter is None:
maxiter = n * 10
if maxiter <= 0:
raise ValueError("maxiter must be positive, maxiter=%d" % maxiter)
if tp not in 'fdFD':
raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'")
if v0 is not None:
# ARPACK overwrites its initial resid, make a copy
self.resid = np.array(v0, copy=True)
info = 1
else:
self.resid = np.zeros(n, tp)
info = 0
if sigma is None:
#sigma not used
self.sigma = 0
else:
self.sigma = sigma
if ncv is None:
ncv = 2 * k + 1
ncv = min(ncv, n)
self.v = np.zeros((n, ncv), tp) # holds Ritz vectors
self.iparam = np.zeros(11, "int")
# set solver mode and parameters
ishfts = 1
self.mode = mode
self.iparam[0] = ishfts
self.iparam[2] = maxiter
self.iparam[3] = 1
self.iparam[6] = mode
self.n = n
self.tol = tol
self.k = k
self.maxiter = maxiter
self.ncv = ncv
self.which = which
self.tp = tp
self.info = info
self.converged = False
self.ido = 0
def _raise_no_convergence(self):
msg = "No convergence (%d iterations, %d/%d eigenvectors converged)"
k_ok = self.iparam[4]
num_iter = self.iparam[2]
try:
ev, vec = self.extract(True)
except ArpackError as err:
msg = "%s [%s]" % (msg, err)
ev = np.zeros((0,))
vec = np.zeros((self.n, 0))
k_ok = 0
raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec)
class _SymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x :
# A - symmetric
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the general eigenvalue problem:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
#
# mode = 4:
# Solve the general eigenvalue problem in Buckling mode:
# A*x = lambda*AG*x
# A - symmetric positive semi-definite
# AG - symmetric indefinite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = left multiplication by [A-sigma*AG]^-1
#
# mode = 5:
# Solve the general eigenvalue problem in Cayley-transformed mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 3:
if matvec is not None:
raise ValueError("matvec must not be specified for mode=3")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=3")
if M_matvec is None:
self.OP = Minv_matvec
self.OPa = Minv_matvec
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(M_matvec(x))
self.OPa = Minv_matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 4:
if matvec is None:
raise ValueError("matvec must be specified for mode=4")
if M_matvec is not None:
raise ValueError("M_matvec must not be specified for mode=4")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=4")
self.OPa = Minv_matvec
self.OP = lambda x: self.OPa(matvec(x))
self.B = matvec
self.bmat = 'G'
elif mode == 5:
if matvec is None:
raise ValueError("matvec must be specified for mode=5")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=5")
self.OPa = Minv_matvec
self.A_matvec = matvec
if M_matvec is None:
self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x)
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(matvec(x)
+ sigma * M_matvec(x))
self.B = M_matvec
self.bmat = 'G'
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _SEUPD_WHICH:
raise ValueError("which must be one of %s"
% ' '.join(_SEUPD_WHICH))
if k >= n:
raise ValueError("k must be less than rank(A), k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k:
raise ValueError("ncv must be k<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(self.ncv * (self.ncv + 8), self.tp)
ltr = _type_conv[self.tp]
if ltr not in ["s", "d"]:
raise ValueError("Input matrix is not real-valued.")
self._arpack_solver = _arpack.__dict__[ltr + 'saupd']
self._arpack_extract = _arpack.__dict__[ltr + 'seupd']
self.iterate_infodict = _SAUPD_ERRORS[ltr]
self.extract_infodict = _SEUPD_ERRORS[ltr]
self.ipntr = np.zeros(11, "int")
def iterate(self):
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info = \
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode == 1:
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.mode == 2:
self.workd[xslice] = self.OPb(self.workd[xslice])
self.workd[yslice] = self.OPa(self.workd[xslice])
elif self.mode == 5:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
Ax = self.A_matvec(self.workd[xslice])
self.workd[yslice] = self.OPa(Ax + (self.sigma *
self.workd[Bxslice]))
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
rvec = return_eigenvectors
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
d, z, ierr = self._arpack_extract(rvec, howmny, sselect, self.sigma,
self.bmat, self.which, self.k,
self.tol, self.resid, self.v,
self.iparam[0:7], self.ipntr,
self.workd[0:2 * self.n],
self.workl, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
class _UnsymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x
# A - square matrix
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the generalized eigenvalue problem:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3,4:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
# if A is real and mode==3, use the real part of Minv_matvec
# if A is real and mode==4, use the imag part of Minv_matvec
# if A is complex and mode==3,
# use real and imag parts of Minv_matvec
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode in (3, 4):
if matvec is None:
raise ValueError("matvec must be specified "
"for mode in (3,4)")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified "
"for mode in (3,4)")
self.matvec = matvec
if tp in 'DF': # complex type
if mode == 3:
self.OPa = Minv_matvec
else:
raise ValueError("mode=4 invalid for complex A")
else: # real type
if mode == 3:
self.OPa = lambda x: np.real(Minv_matvec(x))
else:
self.OPa = lambda x: np.imag(Minv_matvec(x))
if M_matvec is None:
self.B = lambda x: x
self.bmat = 'I'
self.OP = self.OPa
else:
self.B = M_matvec
self.bmat = 'G'
self.OP = lambda x: self.OPa(M_matvec(x))
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _NEUPD_WHICH:
raise ValueError("Parameter which must be one of %s"
% ' '.join(_NEUPD_WHICH))
if k >= n - 1:
raise ValueError("k must be less than rank(A)-1, k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k + 1:
raise ValueError("ncv must be k+1<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(3 * self.ncv * (self.ncv + 2), self.tp)
ltr = _type_conv[self.tp]
self._arpack_solver = _arpack.__dict__[ltr + 'naupd']
self._arpack_extract = _arpack.__dict__[ltr + 'neupd']
self.iterate_infodict = _NAUPD_ERRORS[ltr]
self.extract_infodict = _NEUPD_ERRORS[ltr]
self.ipntr = np.zeros(14, "int")
if self.tp in 'FD':
self.rwork = np.zeros(self.ncv, self.tp.lower())
else:
self.rwork = None
def iterate(self):
if self.tp in 'fd':
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.info)
else:
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.rwork, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode in (1, 2):
self.workd[yslice] = self.OP(self.workd[xslice])
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
k, n = self.k, self.n
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
sigmar = np.real(self.sigma)
sigmai = np.imag(self.sigma)
workev = np.zeros(3 * self.ncv, self.tp)
if self.tp in 'fd':
dr = np.zeros(k + 1, self.tp)
di = np.zeros(k + 1, self.tp)
zr = np.zeros((n, k + 1), self.tp)
dr, di, zr, ierr = \
self._arpack_extract(
return_eigenvectors, howmny, sselect, sigmar, sigmai,
workev, self.bmat, self.which, k, self.tol, self.resid,
self.v, self.iparam, self.ipntr, self.workd, self.workl,
self.info)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
nreturned = self.iparam[4] # number of good eigenvalues returned
# Build complex eigenvalues from real and imaginary parts
d = dr + 1.0j * di
# Arrange the eigenvectors: complex eigenvectors are stored as
# real,imaginary in consecutive columns
z = zr.astype(self.tp.upper())
# The ARPACK nonsymmetric real and double interface (s,d)naupd
# return eigenvalues and eigenvectors in real (float,double)
# arrays.
# Efficiency: this should check that return_eigenvectors == True
# before going through this construction.
if sigmai == 0:
i = 0
while i <= k:
# check if complex
if abs(d[i].imag) != 0:
# this is a complex conjugate pair with eigenvalues
# in consecutive columns
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
else:
# real matrix, mode 3 or 4, imag(sigma) is nonzero:
# see remark 3 in <s,d>neupd.f
# Build complex eigenvalues from real and imaginary parts
i = 0
while i <= k:
if abs(d[i].imag) == 0:
d[i] = np.dot(zr[:, i], self.matvec(zr[:, i]))
else:
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
d[i] = ((np.dot(zr[:, i],
self.matvec(zr[:, i]))
+ np.dot(zr[:, i + 1],
self.matvec(zr[:, i + 1])))
+ 1j * (np.dot(zr[:, i],
self.matvec(zr[:, i + 1]))
- np.dot(zr[:, i + 1],
self.matvec(zr[:, i]))))
d[i + 1] = d[i].conj()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
# Now we have k+1 possible eigenvalues and eigenvectors
# Return the ones specified by the keyword "which"
if nreturned <= k:
# we got less or equal as many eigenvalues we wanted
d = d[:nreturned]
z = z[:, :nreturned]
else:
# we got one extra eigenvalue (likely a cc pair, but which?)
# cut at approx precision for sorting
rd = np.round(d, decimals=_ndigits[self.tp])
if self.which in ['LR', 'SR']:
ind = np.argsort(rd.real)
elif self.which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest
# abs(imaginary) why?
ind = np.argsort(abs(rd.imag))
else:
ind = np.argsort(abs(rd))
if self.which in ['LR', 'LM', 'LI']:
d = d[ind[-k:]]
z = z[:, ind[-k:]]
if self.which in ['SR', 'SM', 'SI']:
d = d[ind[:k]]
z = z[:, ind[:k]]
else:
# complex is so much simpler...
d, z, ierr =\
self._arpack_extract(
return_eigenvectors, howmny, sselect, self.sigma, workev,
self.bmat, self.which, k, self.tol, self.resid, self.v,
self.iparam, self.ipntr, self.workd, self.workl,
self.rwork, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
class SpLuInv(LinearOperator):
"""
SpLuInv:
helper class to repeatedly solve M*x=b
using a sparse LU-decopposition of M
"""
def __init__(self, M):
self.M_lu = splu(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
self.isreal = not np.issubdtype(self.dtype, np.complexfloating)
def _matvec(self, x):
# careful here: splu.solve will throw away imaginary
# part of x if M is real
if self.isreal and np.issubdtype(x.dtype, np.complexfloating):
return (self.M_lu.solve(np.real(x))
+ 1j * self.M_lu.solve(np.imag(x)))
else:
return self.M_lu.solve(x)
class LuInv(LinearOperator):
"""
LuInv:
helper class to repeatedly solve M*x=b
using an LU-decomposition of M
"""
def __init__(self, M):
self.M_lu = lu_factor(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
def _matvec(self, x):
return lu_solve(self.M_lu, x)
class IterInv(LinearOperator):
"""
IterInv:
helper class to repeatedly solve M*x=b
using an iterative method.
"""
def __init__(self, M, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(M.dtype).eps
self.M = M
self.ifunc = ifunc
self.tol = tol
if hasattr(M, 'dtype'):
dtype = M.dtype
else:
x = np.zeros(M.shape[1])
dtype = (M * x).dtype
LinearOperator.__init__(self, M.shape, self._matvec, dtype=dtype)
def _matvec(self, x):
b, info = self.ifunc(self.M, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting M: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
class IterOpInv(LinearOperator):
"""
IterOpInv:
helper class to repeatedly solve [A-sigma*M]*x = b
using an iterative method
"""
def __init__(self, A, M, sigma, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(A.dtype).eps
self.A = A
self.M = M
self.sigma = sigma
self.ifunc = ifunc
self.tol = tol
x = np.zeros(A.shape[1])
if M is None:
dtype = self.mult_func_M_None(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func_M_None,
dtype=dtype)
else:
dtype = self.mult_func(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func,
dtype=dtype)
LinearOperator.__init__(self, A.shape, self._matvec, dtype=dtype)
def mult_func(self, x):
return self.A.matvec(x) - self.sigma * self.M.matvec(x)
def mult_func_M_None(self, x):
return self.A.matvec(x) - self.sigma * x
def _matvec(self, x):
b, info = self.ifunc(self.OP, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting [A-sigma*M]: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
def get_inv_matvec(M, symmetric=False, tol=0):
if isdense(M):
return LuInv(M).matvec
elif isspmatrix(M):
if isspmatrix_csr(M) and symmetric:
M = M.T
return SpLuInv(M).matvec
else:
return IterInv(M, tol=tol).matvec
def get_OPinv_matvec(A, M, sigma, symmetric=False, tol=0):
if sigma == 0:
return get_inv_matvec(A, symmetric=symmetric, tol=tol)
if M is None:
#M is the identity matrix
if isdense(A):
if (np.issubdtype(A.dtype, np.complexfloating)
or np.imag(sigma) == 0):
A = np.copy(A)
else:
A = A + 0j
A.flat[::A.shape[1] + 1] -= sigma
return LuInv(A).matvec
elif isspmatrix(A):
A = A - sigma * identity(A.shape[0])
if symmetric and isspmatrix_csr(A):
A = A.T
return SpLuInv(A.tocsc()).matvec
else:
return IterOpInv(_aslinearoperator_with_dtype(A), M, sigma,
tol=tol).matvec
else:
if ((not isdense(A) and not isspmatrix(A)) or
(not isdense(M) and not isspmatrix(M))):
return IterOpInv(_aslinearoperator_with_dtype(A),
_aslinearoperator_with_dtype(M), sigma,
tol=tol).matvec
elif isdense(A) or isdense(M):
return LuInv(A - sigma * M).matvec
else:
OP = A - sigma * M
if symmetric and isspmatrix_csr(OP):
OP = OP.T
return SpLuInv(OP.tocsc()).matvec
def _eigs(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None, OPinv=None,
OPpart=None):
"""
Find k eigenvalues and eigenvectors of the square matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem
for w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing \
the operation A * x, where A is a real or complex square matrix.
k : int, default 6
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
return_eigenvectors : boolean, default True
Whether to return the eigenvectors along with the eigenvalues.
M : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation M*x for the generalized eigenvalue problem
``A * x = w * M * x``
M must represent a real symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma==None, M is positive definite
* If sigma is specified, M is positive semi-definite
If sigma==None, eigs requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real or complex
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] * x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
For a real matrix A, shift-invert can either be done in imaginary
mode or real mode, specified by the parameter OPpart ('r' or 'i').
Note that when sigma is specified, the keyword 'which' (below)
refers to the shifted eigenvalues w'[i] where:
* If A is real and OPpart == 'r' (default),
w'[i] = 1/2 * [ 1/(w[i]-sigma) + 1/(w[i]-conj(sigma)) ]
* If A is real and OPpart == 'i',
w'[i] = 1/2i * [ 1/(w[i]-sigma) - 1/(w[i]-conj(sigma)) ]
* If A is complex,
w'[i] = 1/(w[i]-sigma)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
`ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``.
which : string ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI']
Which `k` eigenvectors and eigenvalues to find:
- 'LM' : largest magnitude
- 'SM' : smallest magnitude
- 'LR' : largest real part
- 'SR' : smallest real part
- 'LI' : largest imaginary part
- 'SI' : smallest imaginary part
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion)
The default value of 0 implies machine precision.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
Minv : N x N matrix, array, sparse matrix, or linear operator
See notes in M, above.
OPinv : N x N matrix, array, sparse matrix, or linear operator
See notes in sigma, above.
OPpart : 'r' or 'i'.
See notes in sigma, above
Returns
-------
w : array
Array of k eigenvalues.
v : array
An array of `k` eigenvectors.
``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i].
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigsh : eigenvalues and eigenvectors for symmetric matrix A
svds : singular value decomposition for a matrix A
Examples
--------
Find 6 eigenvectors of the identity matrix:
>>> from sklearn.utils.arpack import eigs
>>> id = np.identity(13)
>>> vals, vecs = eigs(id, k=6)
>>> vals
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> vecs.shape
(13, 6)
Notes
-----
This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD,
ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to
find the eigenvalues and eigenvectors [2]_.
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if OPpart is not None:
raise ValueError("OPpart should not be specified with "
"sigma = None or complex A")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
#sigma is not None: shift-invert mode
if np.issubdtype(A.dtype, np.complexfloating):
if OPpart is not None:
raise ValueError("OPpart should not be specified "
"with sigma=None or complex A")
mode = 3
elif OPpart is None or OPpart.lower() == 'r':
mode = 3
elif OPpart.lower() == 'i':
if np.imag(sigma) == 0:
raise ValueError("OPpart cannot be 'i' if sigma is real")
mode = 4
else:
raise ValueError("OPpart must be one of ('r','i')")
matvec = _aslinearoperator_with_dtype(A).matvec
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=False, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None,
OPinv=None, mode='normal'):
"""
Find k eigenvalues and eigenvectors of the real symmetric square matrix
or complex hermitian matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for
w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation A * x, where A is a real symmetric matrix
For buckling mode (see below) A must additionally be positive-definite
k : integer
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
M : An N x N matrix, array, sparse matrix, or linear operator representing
the operation M * x for the generalized eigenvalue problem
``A * x = w * M * x``.
M must represent a real, symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma == None, M is symmetric positive definite
* If sigma is specified, M is symmetric positive semi-definite
* In buckling mode, M is symmetric indefinite.
If sigma == None, eigsh requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
Note that when sigma is specified, the keyword 'which' refers to
the shifted eigenvalues w'[i] where:
- if mode == 'normal',
w'[i] = 1 / (w[i] - sigma)
- if mode == 'cayley',
w'[i] = (w[i] + sigma) / (w[i] - sigma)
- if mode == 'buckling',
w'[i] = w[i] / (w[i] - sigma)
(see further discussion in 'mode' below)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k and smaller than n;
it is recommended that ncv > 2*k
which : string ['LM' | 'SM' | 'LA' | 'SA' | 'BE']
If A is a complex hermitian matrix, 'BE' is invalid.
Which `k` eigenvectors and eigenvalues to find
- 'LM' : Largest (in magnitude) eigenvalues
- 'SM' : Smallest (in magnitude) eigenvalues
- 'LA' : Largest (algebraic) eigenvalues
- 'SA' : Smallest (algebraic) eigenvalues
- 'BE' : Half (k/2) from each end of the spectrum
When k is odd, return one more (k/2+1) from the high end
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion).
The default value of 0 implies machine precision.
Minv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in M, above
OPinv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in sigma, above.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
mode : string ['normal' | 'buckling' | 'cayley']
Specify strategy to use for shift-invert mode. This argument applies
only for real-valued A and sigma != None. For shift-invert mode,
ARPACK internally solves the eigenvalue problem
``OP * x'[i] = w'[i] * B * x'[i]``
and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i]
into the desired eigenvectors and eigenvalues of the problem
``A * x[i] = w[i] * M * x[i]``.
The modes are as follows:
- 'normal' : OP = [A - sigma * M]^-1 * M
B = M
w'[i] = 1 / (w[i] - sigma)
- 'buckling' : OP = [A - sigma * M]^-1 * A
B = A
w'[i] = w[i] / (w[i] - sigma)
- 'cayley' : OP = [A - sigma * M]^-1 * [A + sigma * M]
B = M
w'[i] = (w[i] + sigma) / (w[i] - sigma)
The choice of mode will affect which eigenvalues are selected by
the keyword 'which', and can also impact the stability of
convergence (see [2] for a discussion)
Returns
-------
w : array
Array of k eigenvalues
v : array
An array of k eigenvectors
The v[i] is the eigenvector corresponding to the eigenvector w[i]
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A
svds : singular value decomposition for a matrix A
Notes
-----
This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD
functions which use the Implicitly Restarted Lanczos Method to
find the eigenvalues and eigenvectors [2]_.
Examples
--------
>>> from sklearn.utils.arpack import eigsh
>>> id = np.identity(13)
>>> vals, vecs = eigsh(id, k=6)
>>> vals # doctest: +SKIP
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> print(vecs.shape)
(13, 6)
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
# complex hermitian matrices should be solved with eigs
if np.issubdtype(A.dtype, np.complexfloating):
if mode != 'normal':
raise ValueError("mode=%s cannot be used with "
"complex matrix A" % mode)
if which == 'BE':
raise ValueError("which='BE' cannot be used with complex matrix A")
elif which == 'LA':
which = 'LR'
elif which == 'SA':
which = 'SR'
ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0,
ncv=ncv, maxiter=maxiter, tol=tol,
return_eigenvectors=return_eigenvectors, Minv=Minv,
OPinv=OPinv)
if return_eigenvectors:
return ret[0].real, ret[1]
else:
return ret.real
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
A = _aslinearoperator_with_dtype(A)
matvec = A.matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
# sigma is not None: shift-invert mode
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
# normal mode
if mode == 'normal':
mode = 3
matvec = None
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M = _aslinearoperator_with_dtype(M)
M_matvec = M.matvec
# buckling mode
elif mode == 'buckling':
mode = 4
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
matvec = _aslinearoperator_with_dtype(A).matvec
M_matvec = None
# cayley-transform mode
elif mode == 'cayley':
mode = 5
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
# unrecognized mode
else:
raise ValueError("unrecognized mode '%s'" % mode)
params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _svds(A, k=6, ncv=None, tol=0):
"""Compute k singular values/vectors for a sparse matrix using ARPACK.
Parameters
----------
A : sparse matrix
Array to compute the SVD on
k : int, optional
Number of singular values and vectors to compute.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k+1 and smaller than n;
it is recommended that ncv > 2*k
tol : float, optional
Tolerance for singular values. Zero (default) means machine precision.
Notes
-----
This is a naive implementation using an eigensolver on A.H * A or
A * A.H, depending on which one is more efficient.
"""
if not (isinstance(A, np.ndarray) or isspmatrix(A)):
A = np.asarray(A)
n, m = A.shape
if np.issubdtype(A.dtype, np.complexfloating):
herm = lambda x: x.T.conjugate()
eigensolver = eigs
else:
herm = lambda x: x.T
eigensolver = eigsh
if n > m:
X = A
XH = herm(A)
else:
XH = A
X = herm(A)
if hasattr(XH, 'dot'):
def matvec_XH_X(x):
return XH.dot(X.dot(x))
else:
def matvec_XH_X(x):
return np.dot(XH, np.dot(X, x))
XH_X = LinearOperator(matvec=matvec_XH_X, dtype=X.dtype,
shape=(X.shape[1], X.shape[1]))
# Ignore deprecation warnings here: dot on matrices is deprecated,
# but this code is a backport anyhow
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
eigvals, eigvec = eigensolver(XH_X, k=k, tol=tol ** 2)
s = np.sqrt(eigvals)
if n > m:
v = eigvec
if hasattr(X, 'dot'):
u = X.dot(v) / s
else:
u = np.dot(X, v) / s
vh = herm(v)
else:
u = eigvec
if hasattr(X, 'dot'):
vh = herm(X.dot(u) / s)
else:
vh = herm(np.dot(X, u) / s)
return u, s, vh
# check if backport is actually needed:
if scipy.version.version >= LooseVersion('0.10'):
from scipy.sparse.linalg import eigs, eigsh, svds
else:
eigs, eigsh, svds = _eigs, _eigsh, _svds
| bsd-3-clause |
strawpants/speedplot | src/speedplot.py | 1 | 7326 | #!/usr/bin/python3
# Script to quickly plot line curves from tabulated data
# Author Roelof Rietbroek (http://wobbly.earth)
# initial version 7 March 2017
# License: see file LICENSE (MIT)
import sys
from optparse import OptionParser
import numpy as np
import matplotlib.pyplot as plt
from cycler import cycler
import scipy.io as sio
from ast import literal_eval as make_tuple
#main plotting function
def main(argv):
#set up command line options
usage=argv[0]+ " [Options] [FILES]\n"\
+"Plot lines from tabulated ascii data (file or standard input)\n"\
+"if no FILES are specified the script reads from standard input\n"\
+"FILES may also contain one instance of '-' (standard input)"
parser=OptionParser(usage)
parser.add_option("-s","--skip",metavar="NSKIP",default=0,type="int",help="skip NSKIP header lines in the input file(s)")
parser.add_option("-l","--legend",type="string",metavar="LEGEND",help="Make a legend for the plot by specifying LEGEND as: Curve1/Another Curve/..")
parser.add_option('-c','--columns',type="string",metavar="COLS",help="only print specific columns from the files e.g. COLS: 2/6/8, the default prints all columns in all files")
parser.add_option('-x',"--xlabel",type="string",help="Specify a label to put on the x axis")
parser.add_option('-y',"--ylabel",type="string",help="Specify a label to put on the y axis")
parser.add_option('-t',"--title",type="string",help="Add a title to the plot")
parser.add_option('-o',"--output", metavar="IMAGE", type="string",help="Output the plot to an image rather than a dynamic viewer. Suffices (e.g. .pdf, .eps, .svg, .png) are automatically detected from IMAGE but must be supported by the matplotlib backend)")
parser.add_option('--transparency',action="store_true",help="Set the background to be transparent")
parser.add_option('-m',"--multiply",type="string",metavar="SCALE1/SCALE2/..", help="multiply the Y columns with SCALE1/SCALE2/..")
#parser.add_option('--mat',type="string",default="NOMAT",metavar="VAR",help="Input file is a matlab file. Plot the data from matrixvariable VAR)")
#parser.add_option('--listmat',action="store_true",help="List available variables from the input matlab files")
parser.add_option('-a',"--aspect",type="float",help="Set aspect ratio of the data")
parser.add_option('--size',type="string",metavar='(width,height)',default='(32,18)',help='Set the figure size (in cm)')
parser.add_option("--xlim",type="string",metavar="XSTART/XEND", help="Set the limits of the X axis")
parser.add_option("--ylim",type="string",metavar="YSTARTLEFT/YENDLEFT[/YSTARTRIGHT/YENDRIGHT]", help="Set the limits of the Y axis")
parser.add_option('-g',"--grid",action="store_true",help="show grid on the plot")
parser.add_option('--twin',type="string",metavar='L/R/R/...',help="Create a plot with 2 Y axis systems, and assign the column to either the left(L) or right (R) axis")
parser.add_option('--mean',action="store_true",help="remove the mean from the time series before plotting")
(options, args) = parser.parse_args()
fids=[]
matlab=False
#if options.mat != "NOMAT" or options.listmat:
# matlab=True
#if matlab and not args:
# print("Sorry: Matlab files cannot be read from standard input",file=sys.stderr)
# sys.exit(1)
#if matlab and len(args)!=1:
# print("Only 1 input file allowed for matlab data",file=sys.stderr)
# sys.exit(1)
# if options.listmat:
# tmp=sio.whosmat(args[0])
# for var in tmp:
# print("variable %s"%var[0],file=sys.stdout,end="")
# print(var[1],file=sys.stdout)
# sys.exit(0)
#read in file(s)
if not args:
#read from standard input
nfiles=1
fids.append(sys.stdin)
elif not matlab:
#assume the remaining input arguments are files
for f in args:
if f == "-":
fids.append(sys.stdin)
else:
fids.append(open(f,'r'))
data=[]
xdat=[]
for fid in fids:
#possibly skip some lines
for i in range(options.skip):
fid.readline()
datatmp=[]
xdattmp=[]
for ln in fid.readlines():
fspl=ln.split()
xdattmp.append(float(fspl[0]))
#append the rest
datatmp.append([float(i) for i in fspl[1:]])
#append the data to the collection as a numpy array
xdat.append(np.array(xdattmp))
data.append(np.array(datatmp))
#close the file
fid.close()
if options.columns:
columns=[int(i)-2 for i in options.columns.split('/')]
else:
#plot all columns(max 200) starting from 1
columns=[i for i in range(200)]
#remove the mean from all columns
if options.mean:
for i in range(len(data)):
data[i]-=data[i].mean(axis=0)
if options.legend:
labels=options.legend.split('/')
label=iter(labels)
else:
labels=[]
if options.multiply:
#parse input parameter
if options.multiply.count('/') == 0:
scales=iter([float(options.multiply) for i in range(100)])
else:
scales=iter([float(i) for i in options.multiply.split('/')])
for i in range(len(fids)):
for col in range(data[i].shape[1]):
if not col in columns:
continue
try:
data[i][:,col]*=next(scales)
except StopIteration:
print("Sorry: run out of scale factors",file=sys.stderr)
sys.exit(1)
#do some plotting
#convert figure size from cm in inches
fsize=tuple(x/2.54 for x in make_tuple(options.size))
fig=plt.figure(figsize=fsize)
axislr={}
axislr['L']=fig.gca()
if options.twin:
twinaxis=[i for i in options.twin.split('/')]
axislr['R']=axislr['L'].twinx()
#shift the color cycle of the twin axis
cyc=axislr['R']._get_lines.prop_cycler
[next(cyc) for i in range(0,twinaxis.count('L'))]
else:
twinaxis='L'*200
# print(columns)
axit=iter(twinaxis)
for i in range(len(fids)):
for col in range(data[i].shape[1]):
if not col in columns:
continue
if labels:
try:
axislr[next(axit)].plot(xdat[i],data[i][:,col],label=next(label))
except StopIteration:
print("Sorry: run out of labels for the legend",file=sys.stderr)
sys.exit(1)
else:
# LR=next(axit)
axislr[next(axit)].plot(xdat[i],data[i][:,col])
#add axis labels
if options.xlabel:
plt.xlabel(options.xlabel)
if options.ylabel:
if options.twin:
ylab=options.ylabel.split('/')
axislr['L'].set_ylabel(ylab[0])
if len(ylab)==2:
axislr['R'].set_ylabel(ylab[1])
else:
axislr['L'].set_ylabel(options.ylabel)
#add title
if options.title:
plt.title(options.title)
#create a legend
if labels:
if options.twin:
axislr['L'].legend(loc='upper left')
axislr['R'].legend(loc='upper right')
else:
axislr['L'].legend(loc='best')
#possbily set aspect ratio
if options.aspect:
axislr['L'].set_aspect(options.aspect)
if options.twin:
axislr['R'].set_aspect(options.aspect)
#Possibly set axis limits
if options.xlim:
xlims=[ float(x) for x in options.xlim.split('/')]
axislr['L'].set_xlim(xlims[0],xlims[1])
if options.ylim:
ylims=[float(x) for x in options.ylim.split('/')]
if options.twin:
axislr['L'].set_ylim(ylims[0],ylims[1])
axislr['R'].set_ylim(ylims[2],ylims[3])
else:
axislr['L'].set_ylim(ylims[0],ylims[1])
if options.grid:
plt.grid()
plt.tight_layout()
#print or show the figure
if options.output:
plt.savefig(options.output,bbox_inches='tight',transparent=options.transparency)
else:
plt.show()
if __name__ == "__main__":
main(sys.argv)
| mit |
cython-testbed/pandas | pandas/tests/arrays/categorical/test_indexing.py | 2 | 5273 | # -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas.util.testing as tm
from pandas import Categorical, Index, CategoricalIndex, PeriodIndex, Series
import pandas.core.common as com
from pandas.tests.arrays.categorical.common import TestCategorical
class TestCategoricalIndexingWithFactor(TestCategorical):
def test_getitem(self):
assert self.factor[0] == 'a'
assert self.factor[-1] == 'c'
subf = self.factor[[0, 1, 2]]
tm.assert_numpy_array_equal(subf._codes,
np.array([0, 1, 1], dtype=np.int8))
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_numpy_array_equal(subf._codes,
np.array([2, 2, 2], dtype=np.int8))
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
assert c[0] == 'b'
c[-1] = 'a'
assert c[-1] == 'a'
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical(['c', 'b', 'b', 'a', 'a', 'c', 'c', 'c'],
ordered=True)
tm.assert_categorical_equal(c, expected)
class TestCategoricalIndexing(object):
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
tm.assert_numpy_array_equal(result, expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.int8)
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
tm.assert_numpy_array_equal(cat1._codes, exp_arr)
tm.assert_index_equal(cat1.categories, exp_idx)
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.int8)
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
tm.assert_numpy_array_equal(cat2._codes, exp_arr)
tm.assert_index_equal(cat2.categories, exp_idx2)
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype=np.int8)
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
tm.assert_numpy_array_equal(cat3._codes, exp_arr)
tm.assert_index_equal(cat3.categories, exp_idx)
def test_categories_assigments(self):
s = Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1], dtype=np.int64)
s.categories = [1, 2, 3]
tm.assert_numpy_array_equal(s.__array__(), exp)
tm.assert_index_equal(s.categories, Index([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
pytest.raises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
pytest.raises(ValueError, f)
# Combinations of sorted/unique:
@pytest.mark.parametrize("idx_values", [[1, 2, 3, 4], [1, 3, 2, 4],
[1, 3, 3, 4], [1, 2, 2, 4]])
# Combinations of missing/unique
@pytest.mark.parametrize("key_values", [[1, 2], [1, 5], [1, 1], [5, 5]])
@pytest.mark.parametrize("key_class", [Categorical, CategoricalIndex])
def test_get_indexer_non_unique(self, idx_values, key_values, key_class):
# GH 21448
key = key_class(key_values, categories=range(1, 5))
# Test for flat index and CategoricalIndex with same/different cats:
for dtype in None, 'category', key.dtype:
idx = Index(idx_values, dtype=dtype)
expected, exp_miss = idx.get_indexer_non_unique(key_values)
result, res_miss = idx.get_indexer_non_unique(key)
tm.assert_numpy_array_equal(expected, result)
tm.assert_numpy_array_equal(exp_miss, res_miss)
@pytest.mark.parametrize("index", [True, False])
def test_mask_with_boolean(index):
s = Series(range(3))
idx = Categorical([True, False, True])
if index:
idx = CategoricalIndex(idx)
assert com.is_bool_indexer(idx)
result = s[idx]
expected = s[idx.astype('object')]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("index", [True, False])
def test_mask_with_boolean_raises(index):
s = Series(range(3))
idx = Categorical([True, False, None])
if index:
idx = CategoricalIndex(idx)
with tm.assert_raises_regex(ValueError, 'NA / NaN'):
s[idx]
| bsd-3-clause |
heiko-r/paparazzi | sw/airborne/test/ahrs/ahrs_utils.py | 86 | 4923 | #! /usr/bin/env python
# Copyright (C) 2011 Antoine Drouin
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
from __future__ import print_function
import subprocess
import numpy as np
import matplotlib.pyplot as plt
def run_simulation(ahrs_type, build_opt, traj_nb):
print("\nBuilding ahrs")
args = ["make", "clean", "run_ahrs_on_synth", "AHRS_TYPE=AHRS_TYPE_" + ahrs_type] + build_opt
#print(args)
p = subprocess.Popen(args=args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False)
outputlines = p.stdout.readlines()
p.wait()
for i in outputlines:
print(" # " + i, end=' ')
print()
print("Running simulation")
print(" using traj " + str(traj_nb))
p = subprocess.Popen(args=["./run_ahrs_on_synth", str(traj_nb)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
shell=False)
outputlines = p.stdout.readlines()
p.wait()
# for i in outputlines:
# print(" "+i, end=' ')
# print("\n")
ahrs_data_type = [('time', 'float32'),
('phi_true', 'float32'), ('theta_true', 'float32'), ('psi_true', 'float32'),
('p_true', 'float32'), ('q_true', 'float32'), ('r_true', 'float32'),
('bp_true', 'float32'), ('bq_true', 'float32'), ('br_true', 'float32'),
('phi_ahrs', 'float32'), ('theta_ahrs', 'float32'), ('psi_ahrs', 'float32'),
('p_ahrs', 'float32'), ('q_ahrs', 'float32'), ('r_ahrs', 'float32'),
('bp_ahrs', 'float32'), ('bq_ahrs', 'float32'), ('br_ahrs', 'float32')]
mydescr = np.dtype(ahrs_data_type)
data = [[] for dummy in xrange(len(mydescr))]
# import code; code.interact(local=locals())
for line in outputlines:
if line.startswith("#"):
print(" " + line, end=' ')
else:
fields = line.strip().split(' ')
#print(fields)
for i, number in enumerate(fields):
data[i].append(number)
print()
for i in xrange(len(mydescr)):
data[i] = np.cast[mydescr[i]](data[i])
return np.rec.array(data, dtype=mydescr)
def plot_simulation_results(plot_true_state, lsty, label, sim_res):
print("Plotting Results")
# f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True)
plt.subplot(3, 3, 1)
plt.plot(sim_res.time, sim_res.phi_ahrs, lsty, label=label)
plt.ylabel('degres')
plt.title('phi')
plt.legend()
plt.subplot(3, 3, 2)
plt.plot(sim_res.time, sim_res.theta_ahrs, lsty)
plt.title('theta')
plt.subplot(3, 3, 3)
plt.plot(sim_res.time, sim_res.psi_ahrs, lsty)
plt.title('psi')
plt.subplot(3, 3, 4)
plt.plot(sim_res.time, sim_res.p_ahrs, lsty)
plt.ylabel('degres/s')
plt.title('p')
plt.subplot(3, 3, 5)
plt.plot(sim_res.time, sim_res.q_ahrs, lsty)
plt.title('q')
plt.subplot(3, 3, 6)
plt.plot(sim_res.time, sim_res.r_ahrs, lsty)
plt.title('r')
plt.subplot(3, 3, 7)
plt.plot(sim_res.time, sim_res.bp_ahrs, lsty)
plt.ylabel('degres/s')
plt.xlabel('time in s')
plt.title('bp')
plt.subplot(3, 3, 8)
plt.plot(sim_res.time, sim_res.bq_ahrs, lsty)
plt.xlabel('time in s')
plt.title('bq')
plt.subplot(3, 3, 9)
plt.plot(sim_res.time, sim_res.br_ahrs, lsty)
plt.xlabel('time in s')
plt.title('br')
if plot_true_state:
plt.subplot(3, 3, 1)
plt.plot(sim_res.time, sim_res.phi_true, 'r--')
plt.subplot(3, 3, 2)
plt.plot(sim_res.time, sim_res.theta_true, 'r--')
plt.subplot(3, 3, 3)
plt.plot(sim_res.time, sim_res.psi_true, 'r--')
plt.subplot(3, 3, 4)
plt.plot(sim_res.time, sim_res.p_true, 'r--')
plt.subplot(3, 3, 5)
plt.plot(sim_res.time, sim_res.q_true, 'r--')
plt.subplot(3, 3, 6)
plt.plot(sim_res.time, sim_res.r_true, 'r--')
plt.subplot(3, 3, 7)
plt.plot(sim_res.time, sim_res.bp_true, 'r--')
plt.subplot(3, 3, 8)
plt.plot(sim_res.time, sim_res.bq_true, 'r--')
plt.subplot(3, 3, 9)
plt.plot(sim_res.time, sim_res.br_true, 'r--')
def show_plot():
plt.show()
| gpl-2.0 |
YinongLong/scikit-learn | examples/cluster/plot_kmeans_stability_low_dim_dense.py | 338 | 4324 | """
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
| bsd-3-clause |
clarkchen/leetcode_python | contests/week185/2.py | 1 | 1822 | from typing import List
from collections import OrderedDict
# import pandas as pd
class Solution:
def displayTable(self, orders: List[List[str]]) -> List[List[str]]:
"""
["David","3","Ceviche"],["Corina","10","Beef Burrito"],
:param orders:
:return:
"""
ret = OrderedDict()
food_list = []
table_list = []
for order in orders:
name, table, food = order[0], int(order[1]),order[2]
ret[table] = ret.get(table, OrderedDict())
ret[table][food] = ret[table].get(food, 0)+1
food_list.append(food)
table_list.append(table)
food_list = list(set(food_list))
table_list = list(set(table_list))
food_list.sort()
table_list.sort()
result = []
food_list.insert(0, 'Table')
result.append(food_list)
for table in table_list:
temp = [f"{table}"]
for food in food_list[1:]:
temp.append(f"{ret[table].get(food, 0)}")
result.append(temp)
# print(ret)
return result
if __name__ == '__main__':
s = Solution()
ans = s.displayTable([["David","3","Ceviche"],["Corina","10","Beef Burrito"],["David","3","Fried Chicken"],["Carla","5","Water"],["Carla","5","Ceviche"],["Rous","3","Ceviche"]])
print(ans)
#
orders = [["James", "12", "Fried Chicken"], ["Ratesh", "12", "Fried Chicken"],
["Amadeus", "12", "Fried Chicken"], ["Adam", "1", "Canadian Waffles"],
["Brianna", "1", "Canadian Waffles"]]
ans = s.displayTable(orders)
print(ans)
orders = [["Laura", "2", "Bean Burrito"], ["Jhon", "2", "Beef Burrito"], ["Melissa", "2", "Soda"], ["Laura", "2", "Bean Burrito"]]
ans = s.displayTable(orders)
print(ans) | apache-2.0 |
arjoly/scikit-learn | benchmarks/bench_glm.py | 297 | 1493 | """
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
from sklearn.utils.bench import total_seconds
if __name__ == '__main__':
import pylab as pl
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print('Iteration %s of %s' % (i, n_iter))
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.)
ridge.fit(X, Y)
time_ridge[i] = total_seconds(datetime.now() - start)
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = total_seconds(datetime.now() - start)
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = total_seconds(datetime.now() - start)
pl.figure('scikit-learn GLM benchmark results')
pl.xlabel('Dimensions')
pl.ylabel('Time (s)')
pl.plot(dimensions, time_ridge, color='r')
pl.plot(dimensions, time_ols, color='g')
pl.plot(dimensions, time_lasso, color='b')
pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left')
pl.axis('tight')
pl.show()
| bsd-3-clause |
cl4rke/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | 248 | 2588 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
ThomasMiconi/htmresearch | projects/union_pooling/experiments/union_sdr_continuous/union_pooling_tm_learning.py | 8 | 9165 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import csv
import sys
import time
import os
import yaml
from optparse import OptionParser
import numpy
from pylab import rcParams
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.backends.backend_pdf import PdfPages
plt.ion()
from nupic.data.generators.pattern_machine import PatternMachine
from nupic.data.generators.sequence_machine import SequenceMachine
from nupic.algorithms.monitor_mixin.monitor_mixin_base import MonitorMixinBase
from htmresearch.frameworks.union_temporal_pooling.union_temporal_pooler_experiment import (
UnionTemporalPoolerExperiment)
"""
Experiment 2
Runs UnionTemporalPooler on input from a Temporal Memory while TM learns the sequence
"""
def experiment2():
paramDir = 'params/1024_baseline/5_trainingPasses.yaml'
outputDir = 'results/'
params = yaml.safe_load(open(paramDir, 'r'))
options = {'plotVerbosity': 2, 'consoleVerbosity': 2}
plotVerbosity = 2
consoleVerbosity = 1
print "Running SDR overlap experiment...\n"
print "Params dir: {0}".format(paramDir)
print "Output dir: {0}\n".format(outputDir)
# Dimensionality of sequence patterns
patternDimensionality = params["patternDimensionality"]
# Cardinality (ON / true bits) of sequence patterns
patternCardinality = params["patternCardinality"]
# TODO If this parameter is to be supported, the sequence generation code
# below must change
# Number of unique patterns from which sequences are built
# patternAlphabetSize = params["patternAlphabetSize"]
# Length of sequences shown to network
sequenceLength = params["sequenceLength"]
# Number of sequences used. Sequences may share common elements.
numberOfSequences = params["numberOfSequences"]
# Number of sequence passes for training the TM. Zero => no training.
trainingPasses = params["trainingPasses"]
tmParamOverrides = params["temporalMemoryParams"]
upParamOverrides = params["unionPoolerParams"]
# Generate a sequence list and an associated labeled list (both containing a
# set of sequences separated by None)
start = time.time()
print "\nGenerating sequences..."
patternAlphabetSize = sequenceLength * numberOfSequences
patternMachine = PatternMachine(patternDimensionality, patternCardinality,
patternAlphabetSize)
sequenceMachine = SequenceMachine(patternMachine)
numbers = sequenceMachine.generateNumbers(numberOfSequences, sequenceLength)
generatedSequences = sequenceMachine.generateFromNumbers(numbers)
sequenceLabels = [str(numbers[i + i*sequenceLength: i + (i+1)*sequenceLength])
for i in xrange(numberOfSequences)]
labeledSequences = []
for label in sequenceLabels:
for _ in xrange(sequenceLength):
labeledSequences.append(label)
labeledSequences.append(None)
# Set up the Temporal Memory and Union Pooler network
print "\nCreating network..."
experiment = UnionTemporalPoolerExperiment(tmParamOverrides, upParamOverrides)
# Train only the Temporal Memory on the generated sequences
# if trainingPasses > 0:
#
# print "\nTraining Temporal Memory..."
# if consoleVerbosity > 0:
# print "\nPass\tBursting Columns Mean\tStdDev\tMax"
#
# for i in xrange(trainingPasses):
# experiment.runNetworkOnSequences(generatedSequences,
# labeledSequences,
# tmLearn=True,
# upLearn=None,
# verbosity=consoleVerbosity,
# progressInterval=_SHOW_PROGRESS_INTERVAL)
#
# if consoleVerbosity > 0:
# stats = experiment.getBurstingColumnsStats()
# print "{0}\t{1}\t{2}\t{3}".format(i, stats[0], stats[1], stats[2])
#
# # Reset the TM monitor mixin's records accrued during this training pass
# # experiment.tm.mmClearHistory()
#
# print
# print MonitorMixinBase.mmPrettyPrintMetrics(
# experiment.tm.mmGetDefaultMetrics())
# print
#
# if plotVerbosity >= 2:
# plotNetworkState(experiment, plotVerbosity, trainingPasses, phase="Training")
#
# experiment.tm.mmClearHistory()
# experiment.up.mmClearHistory()
print "\nRunning test phase..."
inputSequences = generatedSequences
inputCategories = labeledSequences
tmLearn = True
upLearn = False
classifierLearn = False
currentTime = time.time()
experiment.tm.reset()
experiment.up.reset()
poolingActivationTrace = numpy.zeros((experiment.up._numColumns, 1))
activeCellsTrace = numpy.zeros((experiment.up._numColumns, 1))
activeSPTrace = numpy.zeros((experiment.up._numColumns, 1))
for _ in xrange(trainingPasses):
for i in xrange(len(inputSequences)):
sensorPattern = inputSequences[i]
inputCategory = inputCategories[i]
if sensorPattern is None:
pass
else:
experiment.tm.compute(sensorPattern,
learn=tmLearn,
sequenceLabel=inputCategory)
if upLearn is not None:
activeCells, predActiveCells, burstingCols, = experiment.getUnionTemporalPoolerInput()
experiment.up.compute(activeCells,
predActiveCells,
learn=upLearn,
sequenceLabel=inputCategory)
currentPoolingActivation = experiment.up._poolingActivation
currentPoolingActivation = experiment.up._poolingActivation.reshape((experiment.up._numColumns, 1))
poolingActivationTrace = numpy.concatenate((poolingActivationTrace, currentPoolingActivation), 1)
currentUnionSDR = numpy.zeros((experiment.up._numColumns, 1))
currentUnionSDR[experiment.up._unionSDR] = 1
activeCellsTrace = numpy.concatenate((activeCellsTrace, currentUnionSDR), 1)
currentSPSDR = numpy.zeros((experiment.up._numColumns, 1))
currentSPSDR[experiment.up._activeCells] = 1
activeSPTrace = numpy.concatenate((activeSPTrace, currentSPSDR), 1)
print "\nPass\tBursting Columns Mean\tStdDev\tMax"
stats = experiment.getBurstingColumnsStats()
print "{0}\t{1}\t{2}\t{3}".format(0, stats[0], stats[1], stats[2])
print
print MonitorMixinBase.mmPrettyPrintMetrics(\
experiment.tm.mmGetDefaultMetrics() + experiment.up.mmGetDefaultMetrics())
print
experiment.tm.mmClearHistory()
# estimate fraction of shared bits across adjacent time point
unionSDRshared = experiment.up._mmComputeUnionSDRdiff()
bitLifeList = experiment.up._mmComputeBitLifeStats()
bitLife = numpy.array(bitLifeList)
# Plot SP outputs, UP persistence and UP outputs in testing phase
def showSequenceStartLine(ax, trainingPasses, sequenceLength):
for i in xrange(trainingPasses):
ax.vlines(i*sequenceLength, 0, 100, linestyles='--')
plt.figure()
ncolShow = 100
f, (ax1, ax2, ax3) = plt.subplots(nrows=1,ncols=3)
ax1.imshow(activeSPTrace[1:ncolShow,:], cmap=cm.Greys,interpolation="nearest",aspect='auto')
showSequenceStartLine(ax1, trainingPasses, sequenceLength)
ax1.set_title('SP SDR')
ax1.set_ylabel('Columns')
ax2.imshow(poolingActivationTrace[1:100,:], cmap=cm.Greys, interpolation="nearest",aspect='auto')
showSequenceStartLine(ax2, trainingPasses, sequenceLength)
ax2.set_title('Persistence')
ax3.imshow(activeCellsTrace[1:ncolShow,:], cmap=cm.Greys, interpolation="nearest",aspect='auto')
showSequenceStartLine(ax3, trainingPasses, sequenceLength)
plt.title('Union SDR')
ax2.set_xlabel('Time (steps)')
pp = PdfPages('results/UnionPoolingDuringTMlearning_Experiment2.pdf')
pp.savefig()
pp.close()
f, (ax1, ax2, ax3) = plt.subplots(nrows=3,ncols=1)
ax1.plot((sum(activeCellsTrace))/experiment.up._numColumns*100)
ax1.set_ylabel('Union SDR size (%)')
ax1.set_xlabel('Time (steps)')
ax1.set_ylim(0,25)
ax2.plot(unionSDRshared)
ax2.set_ylabel('Shared Bits')
ax2.set_xlabel('Time (steps)')
ax3.hist(bitLife)
ax3.set_xlabel('Life duration for each bit')
pp = PdfPages('results/UnionSDRproperty_Experiment2.pdf')
pp.savefig()
pp.close()
if __name__ == "__main__":
experiment2()
| agpl-3.0 |
karoraw1/GLM_Wrapper | bin/parseConfigPickles.py | 1 | 9741 | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 18 12:28:36 2016
@author: login
"""
import cPickle, os, sys
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import statsmodels.api as sm
import pandas as pd
def parseConfigPickles(dir_list, prefix_list, verbose=True):
rootPath = os.path.dirname(os.getcwd())
caseBase = os.path.join(rootPath, 'glm_case_folders')
if os.path.exists(caseBase) and verbose:
print "\nBase Directory Detected"
else:
sys.exit("\nBase Directory Not Detected")
caseDirs = [os.path.join(caseBase, i) for i in dir_list]
cases = []
for i in caseDirs:
if os.path.exists(i) and verbose:
print "\nCase Directory {} Detected".format(i)
else:
print("\nCase Directory {} Not Detected".format(i))
counter = 0
for j in os.listdir(i):
for k in prefix_list:
if k in j and 'pickle' in j:
counter+=1
pickle_path = os.path.join(i, j)
cases.append(pickle_path)
if verbose:
print "\t{}, ({}) pickle detected".format(j, os.path.getsize(pickle_path))
uniq_cases = list(set(cases))
if verbose:
print "\n{} total cases detected".format(len(cases))
print "Reduced to {} after dereplicaion".format(len(uniq_cases))
compiled_runs = {}
all_bad_lakes = []
master_counter = 0
for l in uniq_cases:
f = open(l, 'rb')
bstps_results, bad_lakes = cPickle.load(f)
all_bad_lakes+= bad_lakes
f.close()
for m in bstps_results['config'].keys():
master_counter += 1
compiled_runs[master_counter] = (bstps_results['config'][m],
bstps_results['error'][m])
return compiled_runs, all_bad_lakes
optimized = ["Kw", "min_layer_vol", "min_layer_thick", "max_layer_thick",
"coef_mix_conv", "coef_wind_stir", "coef_mix_shear",
"coef_mix_turb", "coef_mix_KH", "coef_mix_hyp", "bsn_len",
"bsn_wid", "H", "the_sals", "bsn_len_outl", "bsn_wid_outl",
"outflow_factor", "coef_inf_entrain", "strmbd_drag",
"strmbd_slope", "strm_hf_angle", "rain_threshold", "runoff_coef",
"wind_factor", "rain_factor", "at_factor", "rh_factor",
"sw_factor", "lw_factor", "cd", "ce", "ch"]
prefix_list = ['run3_results']
dir_list = ['randomize_2']
result, bad_lakes = parseConfigPickles(dir_list, prefix_list)
errorList = np.array([n[1] for n in result.values()])
print "{} individual simulations detected".format(len(errorList))
configList = [o[0] for o in result.values()]
plt.figure(1)
plt.xlabel("Nash Sutcliffe Efficiency")
plt.ylabel('No. simulations (n)')
plt.hist(errorList, bins=80, color='g')
sortedErr = sorted(errorList)
pct_denom = len(sortedErr)
idx_cutoffs = [int(pct_denom*0.95), int(pct_denom*0.99)]
err_cutoffs = [sortedErr[i] for i in idx_cutoffs]
for i in err_cutoffs:
denom = len(errorList)
numer = float((errorList > i).sum())
frac_above = (int((numer/denom)*1000.)/1000.)*100.
print "{}% of runs had NSE above {}".format(frac_above, i)
error_value_rels = {}
hsy_parsed = {}
for _, p in configList[0].items():
for q in p.keys():
hsy_parsed[q] = {'behavioural':[], 'non-behavioural':[], 'breakers':[]}
error_value_rels[q] = {'value':[], 'error':[]}
bl_A = np.array([85110.3, 163322.5, 222719.2, 283600.9, 372408.3,
436785.8, 506855.8, 577283.3, 616056.1])
bl_H = np.array([0.132, 4.055, 8.112, 12.169, 16.227, 20.284, 24.339,
28.396, 32.454])
bl_sals = np.array([197.6, 395.16, 592.74, 790.32, 988., 1185.48])
bl_temps = np.array([5.04, 5.04, 5.04, 5.04, 5.04, 5.04])
listVars = ['the_temps', 'the_sals', 'A', 'H' ]
listVals = [bl_temps, bl_sals, bl_A, bl_H]
baselineLists = {t:u for t, u in zip(listVars, listVals)}
sig_params = ['A', 'max_layer_thick', 'the_sals', 'H']
run_id = range(len(errorList))
x = np.zeros((len(errorList), len(optimized)))
y = np.zeros((len(errorList),))
for config, error, row_l in zip(configList, errorList, run_id):
#load config & error
for r in config.keys():
param_block = config[r]
for param, value in param_block.items():
if param in listVars:
multArray = (np.array(value)/baselineLists[param]).round(decimals=1)
this_val = round(np.median(multArray),1)
else:
this_val = value
if error > err_cutoffs[0]:
hsy_parsed[param]['behavioural'].append(this_val)
if param in sig_params:
error_value_rels[param]['error'].append(error)
error_value_rels[param]['value'].append(this_val)
elif error <= err_cutoffs[0]:
hsy_parsed[param]['non-behavioural'].append(this_val)
for idx, param2 in enumerate(optimized):
for r in config.keys():
param_block = config[r]
if param2 in param_block.keys():
new_val = param_block[param2]
if param2 in listVars:
multArray = (np.array(new_val)/baselineLists[param2]).round(decimals=1)
this_val = round(np.median(multArray),1)
else:
this_val = new_val
x[row_l, idx] = this_val
y[row_l] = error
else:
pass
from sklearn import preprocessing
from sklearn import linear_model
x_scaled = preprocessing.scale(x)
clf = linear_model.Ridge()
clf.fit(x_scaled, y*100)
coeffs = pd.DataFrame(index = optimized, data = clf.coef_)
coeffs.sort_values(0, inplace=True, ascending=False)
for bL in bad_lakes:
for r in bL.keys():
param_block = bL[r]
for param, value in param_block.items():
if param in listVars:
multArray = (np.array(value)/baselineLists[param]).round(decimals=1)
this_val = round(np.median(multArray),1)
else:
this_val = value
hsy_parsed[param]['breakers'].append(this_val)
def HSY_Sensitivity(group1, group2):
full_set = group1 + group2
x = np.linspace(min(full_set), max(full_set))
ecdf_1 = sm.distributions.ECDF(group1)
ecdf_2 = sm.distributions.ECDF(group2)
cumdist_1 = ecdf_1(x)
cumdist_2 = ecdf_2(x)
ks_stat, p_val = stats.ks_2samp(cumdist_1, cumdist_2)
return x, cumdist_2, cumdist_1, p_val
breaker_ps = {}
p_vals = {}
sig_distributions = {}
breakers_sigs = {}
for param in hsy_parsed.keys():
if param in optimized:
behavioural = hsy_parsed[param]['behavioural']
non_behavioural = hsy_parsed[param]['non-behavioural']
breakers = hsy_parsed[param]['breakers']
p_vals[param] = HSY_Sensitivity(behavioural, non_behavioural)
breaker_ps[param]= HSY_Sensitivity(non_behavioural+non_behavioural, breakers)
if p_vals[param][3] < 0.05:
sig_distributions[param] = (p_vals[param][3],
p_vals[param][0],
p_vals[param][1],
p_vals[param][2])
if breaker_ps[param][3] < 0.05:
breakers_sigs[param] = (breaker_ps[param][3],
breaker_ps[param][0],
breaker_ps[param][1],
breaker_ps[param][2])
"""
plt.figure(2)
plt.ylabel("Probability")
ax1 = plt.subplot(211)
plt.title("Depth Profile Scalar")
plt.plot(sig_distributions['H'][1], sig_distributions['H'][2],
label="non-behavioural")
plt.plot(sig_distributions['H'][1], sig_distributions['H'][3],
label="behavioural")
plt.legend(loc='upper left')
# share x and y
ax2 = plt.subplot(212)
plt.title("Salinitiy Profile Scalar")
plt.plot(sig_distributions['the_sals'][1], sig_distributions['the_sals'][2])
plt.plot(sig_distributions['the_sals'][1], sig_distributions['the_sals'][3])
plt.show()
sys.exit()
plt.figure(3, figsize=(9,9))
ax1 = plt.subplot(211)
plt.plot(breakers_sigs['max_layer_thick'][1], breakers_sigs['max_layer_thick'][2],
label="Invalid")
plt.plot(breakers_sigs['max_layer_thick'][1], breakers_sigs['max_layer_thick'][3],
label="Valid")
plt.legend(loc='upper left')
plt.title("Maximum Layer Thickness")
# share x and y
ax2 = plt.subplot(212)
plt.plot(breakers_sigs['A'][1], breakers_sigs['A'][2])
plt.plot(breakers_sigs['A'][1], breakers_sigs['A'][3])
plt.xlim([0.5, 1.6])
plt.ylabel("Probability")
plt.title("Area Profile Scalar")
ax3 = plt.subplot(313)
plt.plot(breakers_sigs['H'][1], breakers_sigs['H'][2])
plt.plot(breakers_sigs['H'][1], breakers_sigs['H'][3])
plt.xlim([1.0, 1.7])
plt.title("Depth Profile Scalar")
plt.xlabel("Parameter Value")
plt.figure(4)
ax1 = plt.subplot(122)
plt.scatter(error_value_rels['the_sals']['error'],
error_value_rels['the_sals']['value'] )
hline = plt.ylim()
plt.plot(np.ones(2)*0.858, hline, c='r')
plt.title("Salinity Profile Scalar")
plt.xlabel("Nash-Sutcliffe Efficiency")
plt.ylim(hline)
ax2 = plt.subplot(121)
plt.scatter(error_value_rels['H']['error'],
error_value_rels['H']['value'] )
hline = plt.ylim()
plt.plot(np.ones(2)*0.858, hline, c='r')
plt.title("Depth Profile Scalar")
plt.ylim(hline)
plt.xlabel("Nash-Sutcliffe Efficiency")
plt.ylabel("Parameter Value")
""" | mit |
liberatorqjw/scikit-learn | sklearn/mixture/tests/test_gmm.py | 24 | 12725 | import unittest
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
rng = np.random.RandomState(0)
def test_sample_gaussian():
"""
Test sample generation from mixture.sample_gaussian where covariance
is diagonal, spherical and full
"""
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
print(x)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
"""
test a slow and naive implementation of lmvnpdf and
compare it to the vectorized version (mixture.lmvnpdf) to test
for correctness
"""
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, spherecv,
'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
def test_train_degenerate(self, params='wmc'):
""" Train on degenerate data with 0 in some dimensions
"""
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
def test_train_1d(self, params='wmc'):
""" Train on 1-D data
"""
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, 1)
#X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.DPGMM):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
def score(self, g, X):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
def test_multiple_init():
"""Test that multiple inits does not much worse than a single one"""
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
def test_n_parameters():
"""Test that the right number of parameters is estimated"""
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
def test_aic():
""" Test the aic and bic criteria"""
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
gabrielcnr/sinteglas | df.py | 1 | 4037 | from PyQt4.QtCore import *
from PyQt4.QtGui import *
import pandas as pd
import numpy as np
class Column(object):
def __init__(self, title, key, align='left', fmt='',
cell_style_callback=None):
self.title = title
self.key = key
self.align = align
self.fmt = fmt
if cell_style_callback is None:
cell_style_callback = self._default_cell_style_callback
self.cell_style_callback = cell_style_callback
def _default_cell_style_callback(self, array, index, role):
return None
align_map = {
'left': Qt.AlignVCenter | Qt.AlignLeft,
'center': Qt.AlignVCenter | Qt.AlignCenter,
'right': Qt.AlignVCenter | Qt.AlignRight,
}
DefaultColumn = Column(None, None, align='right', fmt='.1f')
GRID_FONT = QFont('Segoe UI')
GRID_FONT.setPixelSize(12)
HEADER_FONT = QFont('Segoe UI')
HEADER_FONT.setPixelSize(12)
HEADER_FONT_BOLD = QFont('Segoe UI')
HEADER_FONT_BOLD.setPixelSize(12)
HEADER_FONT_BOLD.setBold(True)
class Model(QAbstractTableModel):
def __init__(self, df, columns=None, parent=None):
super(Model, self).__init__(parent=parent)
self.df = df
self.row_count = len(df)
self.col_count = len(df.columns)
self.headers = list(df.columns)
self.array = df.as_matrix()
self.cols = {}
for col in columns or []:
if col.key in self.headers:
self.cols[self.headers.index(col.key)] = col
col.qt_align = align_map[col.align]
DefaultColumn.qt_align = align_map[DefaultColumn.align]
def rowCount(self, parent=None, *args, **kwargs):
return self.row_count
def columnCount(self, QModelIndex_parent=None, *args, **kwargs):
return self.col_count
def data(self, index, role=None):
if role == Qt.DisplayRole:
col = self.cols.get(index.column(), DefaultColumn)
return format(self.array[index.row(), index.column()], col.fmt)
elif role == Qt.TextAlignmentRole:
col = self.cols.get(index.column(), DefaultColumn)
return col.qt_align
elif role == Qt.FontRole:
return GRID_FONT
elif role == Qt.ForegroundRole:
col = self.cols.get(index.column(), DefaultColumn)
return col.cell_style_callback(self.array, index, role)
def headerData(self, index, orientation, role=None):
if orientation == Qt.Horizontal:
if role == Qt.DisplayRole:
col = self.cols.get(index)
if col is None:
return self.headers[index]
else:
return col.title
elif role == Qt.FontRole:
return HEADER_FONT_BOLD
else:
if role == Qt.DisplayRole:
return str(self.df.index[index])
elif role == Qt.TextAlignmentRole:
return Qt.AlignVCenter | Qt.AlignRight
elif role == Qt.FontRole:
return HEADER_FONT
BLUE = QColor(Qt.blue)
DARKCYAN = QColor(Qt.darkCyan)
RED = QColor(Qt.red)
def custom_cell_style(array, index, role):
if role == Qt.ForegroundRole:
value = array[index.row(), index.column()]
if value < 0.3:
return BLUE
elif 0.3 <= value < 0.8:
return DARKCYAN
else:
return RED
columns = [
Column('FOO', 'Column 2', align='center', fmt='.2f', cell_style_callback=custom_cell_style),
Column('BAR', 'Column 4', align='right', fmt='.4f', cell_style_callback=custom_cell_style),
]
app = QApplication([])
df = pd.DataFrame(np.random.rand(100000, 200),
columns=['Column %d' % i for i in xrange(1, 201)],
index=range(125, 100125))
model = Model(df, columns=columns)
view = QTableView()
view.setAlternatingRowColors(True)
view.verticalHeader().setResizeMode(QHeaderView.Fixed)
view.verticalHeader().setDefaultSectionSize(26)
view.setModel(model)
view.showMaximized()
app.exec_()
| mit |
facebookresearch/ParlAI | parlai/crowdsourcing/tasks/acute_eval/analysis.py | 1 | 31132 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
ACUTE-Eval Analyzer.
FOR ANALYSIS!!
"""
import hashlib
import json
import os
from copy import deepcopy
from datetime import datetime
from typing import Dict, Any, List, Optional
import numpy as np
import pandas as pd
from IPython.core.display import HTML
from mephisto.tools.data_browser import DataBrowser as MephistoDataBrowser
from mephisto.abstractions.databases.local_database import LocalMephistoDB
from mephisto.data_model.unit import Unit as MephistoUnit
from scipy.stats import binom_test
from parlai.core.params import ParlaiParser
from parlai.crowdsourcing.tasks.acute_eval.acute_eval_blueprint import (
BLUEPRINT_TYPE as ACUTE_EVAL_BLUEPRINT_TYPE,
)
from parlai.crowdsourcing.tasks.acute_eval.fast_acute_blueprint import (
FAST_ACUTE_BLUEPRINT_TYPE,
)
# To register the ACUTE-Eval and Fast ACUTE blueprints
from parlai.crowdsourcing.tasks.acute_eval.util import get_hashed_combo_path
_ = ACUTE_EVAL_BLUEPRINT_TYPE
_ = FAST_ACUTE_BLUEPRINT_TYPE
# TODO: blueprint type strings need to be imported here to register the blueprints -
# find a better way to scale up when there are many more subclassed ACUTE blueprints
# throw away turkers below this threshold
AGREEMENT_THRESHOLD = 0.8
# do we count ties as agreements?
AGREEMENT_TIES_OKAY = False
# NOTE: these could be added as flags if desired
def setup_args():
"""
Setup appropriate args.
"""
parser = ParlaiParser(False, False)
parser.add_argument(
'-ids',
'--run-ids',
type=str,
default=None,
help='Comma-separated list of run IDs to analyze',
)
parser.add_argument(
'--root-dir',
type=str,
default=None,
help='Optional root ACUTE-Eval save directory',
)
parser.add_argument(
'--outdir', type=str, default=None, help='Where to save the results'
)
parser.add_argument(
'--pairings-filepath',
type=str,
default=None,
help='Path to the ACUTE analysis pairs for the corresponding run id',
)
parser.add_argument(
'--mephisto-root',
type=str,
default=None,
help='Where to check for mephisto data (default own dir)',
)
parser.add_argument(
'--model-ordering',
type=str,
default=None,
help='Comma-separated list of models, in the order in which to display them',
)
return parser
class AcuteAnalyzer(object):
"""
Analyzer.
Given a run_id, we can do lots of fun things!
"""
CHECKBOX_PREFIX = 'checkbox: '
# Prepended to checkbox columns in self.dataframe
def __init__(self, opt: Dict, remove_failed: bool = True):
"""
Initialize the analyzer.
Builds up the dataframe
:param opt:
opt dict
:param remove_failed:
Whether to remove ratings from turkers who failed onboarding
"""
assert ',' not in opt['run_ids'], "AcuteAnalyzer can only handle one run ID!"
self.run_id = opt['run_ids']
self.pairings_filepath = opt['pairings_filepath']
self.outdir = opt['outdir']
self.root_dir = opt['root_dir']
# Get task for loading pairing files
self.task = opt.get('task', 'q')
if opt.get('model_ordering') is not None:
self.custom_model_ordering = opt['model_ordering'].split(',')
else:
self.custom_model_ordering = None
if not self.outdir or not self.pairings_filepath:
# Default to using self.root_dir as the root directory for outputs
assert self.root_dir is not None and os.path.isdir(
self.root_dir
), '--root-dir must be a real directory!'
if not self.pairings_filepath:
# Will be set to a non-empty path later
self.pairings_filepath = ''
if not self.outdir:
self.outdir = os.path.join(self.root_dir, f'{self.run_id}-results')
if not os.path.exists(self.outdir):
os.makedirs(self.outdir, exist_ok=True)
mephisto_root_path = opt['mephisto_root']
if not mephisto_root_path:
mephisto_root_path = None
mephisto_db = LocalMephistoDB(database_path=mephisto_root_path)
self.mephisto_data_browser = MephistoDataBrowser(db=mephisto_db)
self.checkbox_prefix = self.CHECKBOX_PREFIX
# Prepended to checkbox columns in self.dataframe
self.dataframe = self._extract_to_dataframe()
self._check_eval_question()
if remove_failed:
self._remove_failed_onboarding()
if self.dataframe.index.size == 0:
raise ValueError('No valid results found!')
self._get_model_nick_names()
self._load_pairing_files()
def _extract_response_by_index(
self, unit_details: Dict[str, Any], idx: int
) -> Optional[Dict[str, Any]]:
"""
Extract response data from task data.
:param unit_details:
full extracted data from a unit
:param idx:
index of the singular evaluation within unit_details to extract
:return response:
Formatted worker's response data from the task
"""
task_data = unit_details['data'][idx]
response: Dict[str, Any] = {
'run_id': self.run_id,
'worker': unit_details['worker_id'],
'time_taken': unit_details['task_end'] - unit_details['task_start'],
'question': task_data['task_specs']['question'],
'unit_id': unit_details['unit_id'],
'task_start': unit_details['task_start'],
}
onboarding = task_data['task_specs'].get('is_onboarding', False)
if 'speakerChoice' not in task_data or task_data['speakerChoice'] == '':
print('speakerChoice not in task data!')
return
choice = task_data['speakerChoice']
if onboarding:
response['correct'] = choice == task_data['pairing_dict']['correct_answer']
else:
response['correct'] = -1
speakers_to_eval = sorted(task_data["pairing_dict"]["speakers_to_eval"])
response.update(
{
'winner': choice,
'loser': speakers_to_eval[1 - (speakers_to_eval.index(choice))],
'eval_choice_0': speakers_to_eval[0],
'eval_choice_1': speakers_to_eval[1],
'reason': task_data['textReason'],
'is_onboarding': onboarding,
'matchup': f"{'__vs__'.join(speakers_to_eval)}",
'pairing_id': task_data['pair_id'],
}
)
# If it exists, add in which checkboxes of possible reasons the Turkers checked
if len(task_data.get('speakerReasons', {})) > 0:
response.update(
{
self.checkbox_prefix + reason: checked
for reason, checked in task_data['speakerReasons'].items()
}
)
return response
def _parse_unit(self, unit: MephistoUnit) -> Optional[Dict[str, Any]]:
"""
Return data for a given unit.
If the data is corrupt for whatever reason, we return None
:param unit:
MephistoUnit of what should be a completed task by a worker
:return data:
Optional dict with the task's formatted data
"""
try:
return self.mephisto_data_browser.get_data_from_unit(unit)
except AssertionError:
print(
f"WARNING: Data for run_id `{self.run_id}` not found for "
f"unit id {unit.db_id}"
)
return None
def _extract_to_dataframe(self) -> pd.DataFrame:
"""
Extract the data from the run to a pandas dataframe.
"""
units = self.mephisto_data_browser.get_units_for_task_name(self.run_id)
responses: List[Dict[str, Any]] = []
for unit in units:
unit_details = self._parse_unit(unit)
if unit_details is None:
continue
for idx in range(len(unit_details['data'])):
response = self._extract_response_by_index(unit_details, idx)
if response is not None:
responses.append(response)
if len(responses) == 0:
raise ValueError('No valid results found!')
else:
return pd.DataFrame(responses)
def _check_eval_question(self):
"""
Check that the same eval question has been used for all results.
"""
if len(set(self.dataframe['question'].unique())) > 1:
raise ValueError(
'All results must share the same eval question for consistency!'
)
def _remove_failed_onboarding(self):
"""
Remove workers who failed onboarding.
"""
df = self.dataframe
all_workers_failing_onboarding = df.loc[
df['is_onboarding'] & (df['correct'] == False), 'worker' # noqa: E712
].values
workers_failing_onboarding = sorted(
np.unique(all_workers_failing_onboarding).tolist()
)
self.dataframe = df[
~df["worker"].isin(workers_failing_onboarding) & ~df["is_onboarding"]
]
print(
f'{self.dataframe.size:d} dataframe entries remaining after removing users who failed onboarding.'
)
def _load_pairing_files(self):
df = self.dataframe
if not os.path.exists(self.pairings_filepath):
print('No valid pairings filepath was passed in: will extract likely path.')
self.pairings_filepath = get_hashed_combo_path(
root_dir=self.root_dir,
subdir='pairings_files',
task=self.task,
combos=self.combos,
)
if not os.path.exists(self.pairings_filepath):
print(
f'WARNING: Pairings filepath {self.pairings_filepath} could not be found.'
)
self.pairings_filepath = os.path.join(
self.root_dir,
'pairings_files',
hashlib.sha1(
'___vs___'.join(
[f"{m}.{'q'.replace(':', '_')}" for m in self.models]
).encode('utf-8')
).hexdigest()[:10],
)
if not os.path.exists(self.pairings_filepath):
# For backward compatibility
print(
f'WARNING: Pairings filepath {self.pairings_filepath} could not be found.'
)
self.pairings_filepath = os.path.join(
self.root_dir,
'pairings_files',
'___vs___'.join(
[f"{m}.{self.task.replace(':', '_')}" for m in self.models]
),
)
if not os.path.exists(self.pairings_filepath):
print(
f'NOTE: Pairings filepath {self.pairings_filepath} could not be found!'
)
return
self.pairings = []
with open(self.pairings_filepath, 'r') as f:
for line in f:
pair = json.loads(line)
model1, model2 = pair['speakers_to_eval']
pair[model1] = pair['dialogue_dicts'][0]
pair[model2] = pair['dialogue_dicts'][1]
del pair['dialogue_dicts']
self.pairings.append(pair)
self.pairs_to_eval = [self.pairings[i] for i in df.pairing_id.values.tolist()]
# Build dialogue_ids => dialogue mappings
winner_dialogues = []
loser_dialogues = []
for i, (_, row) in enumerate(df.iterrows()):
winner = row['winner']
loser = row['loser']
winner_dialogues.append(self.pairs_to_eval[i][winner])
loser_dialogues.append(self.pairs_to_eval[i][loser])
df['pairs_to_eval'] = pd.Series(self.pairs_to_eval, index=df.index)
df['winner_dialogue'] = pd.Series(winner_dialogues, index=df.index)
df['loser_dialogue'] = pd.Series(loser_dialogues, index=df.index)
self.dataframe = df
def _get_model_nick_names(self):
df = self.dataframe
df = df[df['run_id'] == self.run_id]
matchups = list(df.matchup.unique())
models = set()
combos = set()
for matchup in matchups:
model1, model2 = matchup.split('__vs__')
models.add(model1)
models.add(model2)
combos.add(tuple(sorted((model1, model2))))
self.models = list(models)
self.models.sort()
self.combos = list(combos)
self.combos.sort()
def get_reasons(self) -> List[str]:
"""
Return dataframe reasons.
"""
return self.dataframe['reason'].values.tolist()
def get_max_hits_per_worker(self) -> List[int]:
"""
Get max number of hits per worker.
"""
return self.dataframe.groupby('worker')['run_id'].count().max()
def get_wins_per_model_matchup(self) -> pd.DataFrame:
"""
Return the wins for each model by matchup.
"""
self.matchup_total_df = (
self.dataframe.groupby(['eval_choice_0', 'eval_choice_1'])['run_id']
.count()
.to_frame('matchup_total')
)
self.win_total_df = (
self.dataframe.groupby(
['eval_choice_0', 'eval_choice_1', 'winner', 'loser']
)['loser']
.count()
.to_frame('win_total')
.reset_index()
.set_index(['eval_choice_0', 'eval_choice_1'])
)
return self.win_total_df
def get_win_fractions(self) -> pd.DataFrame:
"""
Return the joined matchup + win totals, get win fractions.
Sorted according to win percentage
"""
if not hasattr(self, 'win_total_df'):
self.get_wins_per_model_matchup()
self.win_fraction_df = self.matchup_total_df.join(self.win_total_df).assign(
win_frac=lambda df: df['win_total'] / df['matchup_total']
)
pivoted_df = self.win_fraction_df.pivot(
index="loser", columns="winner", values="win_frac"
)
if self.custom_model_ordering is not None:
# Use the ordering of the models supplied by the user
assert set(self.custom_model_ordering) == set(pivoted_df.columns)
self.model_ordering = self.custom_model_ordering
else:
self.model_ordering = (
self.win_fraction_df.groupby("winner")["win_frac"]
.mean()
.sort_values()
.index.values.tolist()
)
self.sorted_win_frac_df = pivoted_df.reindex(
index=self.model_ordering, columns=self.model_ordering
)
return self.sorted_win_frac_df
def get_num_hits_per_matchup(self):
"""
Return the number of hits per matchup.
"""
matchup_total_1_df = self.matchup_total_df.reset_index()
matchup_total_2_df = matchup_total_1_df.rename(
columns={'eval_choice_0': 'eval_choice_1', 'eval_choice_1': 'eval_choice_0'}
)
self.num_hits_per_matchup_df = (
pd.concat([matchup_total_1_df, matchup_total_2_df], axis=0)
.pivot(
index='eval_choice_0', columns='eval_choice_1', values='matchup_total'
)
.reindex(index=self.model_ordering, columns=self.model_ordering)
)
return self.num_hits_per_matchup_df
def _compile_checkbox_stats(self) -> Dict[str, pd.DataFrame]:
"""
Return the fraction of time that Turkers selected each checkbox.
Results are cut both (1) by matchup and winner and (2) by just the winner. Each
checkbox represents one reason that the Turkers could have chosen the speaker
that they did.
"""
checkbox_columns = [
col
for col in self.dataframe.columns
if col.startswith(self.checkbox_prefix)
]
group_column_types = {
'matchup_and_winner': ['matchup', 'winner'],
'winner': ['winner'],
}
grouped_dataframes = {}
for group_type, group_columns in group_column_types.items():
selected_columns = (
self.dataframe[group_columns + checkbox_columns]
.rename(
columns={
col: col[len(self.checkbox_prefix) :]
for col in checkbox_columns
}
)
.set_index(group_columns)
.fillna(False)
)
grouped_dataframes[group_type] = selected_columns.groupby(
group_columns
).mean()
return grouped_dataframes
def _compile_convos_and_reasons(self) -> str:
"""
Create a human-readable string of all pairs of conversations, as well as which
conversation each Turker chose and their reason for choosing it.
"""
pairing_outputs = []
for _, pairing_sr in self.dataframe.iterrows():
winning_dialogue = self._dialogue_to_string(
pairing_sr['winner_dialogue']['dialogue']
)
loser_dialogue = self._dialogue_to_string(
pairing_sr['loser_dialogue']['dialogue']
)
pairing_output = f"""CONVO PAIR ID: {pairing_sr['pairing_id']}
WINNING DIALOGUE: {pairing_sr['winner']}
{winning_dialogue}
LOSING DIALOGUE: {pairing_sr['loser']}
{loser_dialogue}
QUESTION: {pairing_sr['question']}
TURKER'S CHOICE: {pairing_sr['winner']}
REASON: {pairing_sr['reason']}
"""
pairing_outputs.append(pairing_output)
return ''.join(pairing_outputs)
@staticmethod
def _dialogue_to_string(dialogue: List[dict]) -> str:
"""
Convert a list of dictionaries into a human-readable conversation.
Each dictionary represents one utterance.
"""
utterance_strings = []
for utterance_dict in dialogue:
if utterance_dict["id"] == "human_evaluator":
speaker_string = "HUMAN"
else:
speaker_string = utterance_dict["id"]
utterance = utterance_dict["text"]
utterance_strings.append(f"[{speaker_string}]: {utterance}")
return "\n".join(utterance_strings)
def get_matchup_totals_with_significance(self) -> pd.DataFrame:
"""
Return dataframe with matchup win totals + significance.
"""
def _signf_level(p):
if p < 0.001:
return "***", "p<.001"
elif p < 0.01:
return "**", "p<.01"
elif p < 0.05:
return "*", "p<.05"
else:
return "", "p>.05"
output = []
for _, run_annotations in self.dataframe.groupby('run_id'):
question = list(run_annotations.question)[0]
for matchup, annotations in run_annotations.groupby('matchup'):
model1, model2 = matchup.split('__vs__')
wincount1 = np.sum(annotations['winner'] == model1)
wincount2 = np.sum(annotations['winner'] == model2)
numratings = wincount1 + wincount2
winrate1 = np.mean(annotations['winner'] == model1)
winrate2 = np.mean(annotations['winner'] == model2)
p = binom_test([wincount1, wincount2])
stars, plevel = _signf_level(p)
agreements = []
for _, pairing_annotations in annotations.groupby('pairing_id'):
pair_wincount1 = np.sum(pairing_annotations['winner'] == model1)
pair_wincount2 = np.sum(pairing_annotations['winner'] == model2)
if pair_wincount1 < 2 and pair_wincount2 < 2:
if pair_wincount1 == 1 and pair_wincount2 == 1:
agreements.append(0)
else:
majority_wincount = max(pair_wincount1, pair_wincount2)
num_pair_annotations = pair_wincount1 + pair_wincount2
pair_agreement = majority_wincount / num_pair_annotations
agreements.append(pair_agreement)
total_agreement = np.mean(agreements)
output.append(
{
'question': question,
'matchup': matchup,
'model1': model1,
'model2': model2,
'numwins1': wincount1,
'numwins2': wincount2,
'winrate1': winrate1,
'winrate2': winrate2,
'numratings': numratings,
'p': p,
'stars': stars,
'sigf': plevel,
'agree': total_agreement,
}
)
output = pd.DataFrame(output)
# order the columns how we want
self.significance_df = output[
[
'question',
'matchup',
'model1',
'numwins1',
'winrate1',
'model2',
'numwins2',
'winrate2',
'numratings',
'sigf',
'stars',
'p',
'agree',
]
]
return self.significance_df
def save_results(self, path: str = None):
"""
Save results to a certain path.
"""
if not hasattr(self, 'significance_df'):
self.get_matchup_totals_with_significance()
if path is None:
path = self.outdir
# Save raw dataframe
self.dataframe.to_csv(f'{path}/{self.run_id}.full.csv', index=False)
with open('{}/{}.significance.csv'.format(path, self.run_id), 'w') as f:
f.write(self.significance_df.to_csv(index=False))
print(
'To visualize significance result, try cat {} | column -t -s, | less -S'.format(
'{}/{}.significance.csv'.format(path, self.run_id)
)
)
with open('{}/{}.grid.csv'.format(path, self.run_id), 'w') as f:
f.write(self.get_win_fractions().to_csv(index=True))
with open(f'{path}/{self.run_id}.grid.winners_as_rows.csv', 'w') as f:
f.write(self.get_win_fractions().transpose().to_csv(index=True))
print(
'To visualize grid result, try cat {} | column -t -s, | less -S'.format(
'{}/{}.grid.csv'.format(path, self.run_id)
)
)
# Save stats on how many ratings each worker did
ratings_per_worker = (
self.dataframe.groupby('worker')['run_id']
.count()
.sort_values(ascending=False)
)
ratings_per_worker.to_csv(f'{path}/{self.run_id}.ratings_per_worker.csv')
# Save stats on how often Turkers selected each checkbox that represents one
# reason to pick the speaker they did
if any(col.startswith(self.checkbox_prefix) for col in self.dataframe.columns):
checkbox_stats_dataframes = self._compile_checkbox_stats()
for group_type, stats in checkbox_stats_dataframes.items():
stats.to_csv(f'{path}/{self.run_id}.checkbox_stats.{group_type}.csv')
if not hasattr(self, 'pairings'):
print('No pairing file found, skipping conversation visualizations.')
else:
with open('{}/{}.reason.html'.format(path, self.run_id), 'w') as f:
f.write(render_conversations_per_matchups(self.dataframe, True).data)
print(
'To visualize conversations with reasons only result, '
'try scp username@devfair:{} to your local machine'.format(
' {}/{}.reason.html'.format(path, self.run_id)
)
)
with open('{}/{}.all.html'.format(path, self.run_id), 'w') as f:
f.write(render_conversations_per_matchups(self.dataframe, False).data)
print(
'To visualize conversations result, try scp username@devfair:{}'
' to your local machine'.format(
'{}/{}.all.html'.format(path, self.run_id)
)
)
# Write all pairs of dialogues, as well as the Turkers' choices and reasons, as
# a text file
compiled_text = self._compile_convos_and_reasons()
with open(f'{path}/{self.run_id}.all_convo_pairs.txt', 'w') as f:
f.write(compiled_text)
class MultiRunAcuteAnalyzer(AcuteAnalyzer):
"""
Combine results from different ACUTE-Eval runs.
"""
def __init__(self, opt: Dict, dataframes: Dict[str, pd.DataFrame]):
"""
Read in and combine the dataframes of other already-analyzed ACUTE-Eval runs.
"""
self.outdir = opt['outdir']
if opt.get('model_ordering') is not None:
self.custom_model_ordering = opt['model_ordering'].split(',')
else:
self.custom_model_ordering = None
self.run_id = 'combined'
self.checkbox_prefix = self.CHECKBOX_PREFIX
# Prepended to checkbox columns in self.dataframe
for dataframe in dataframes.values():
dataframe.loc[:, 'run_id'] = self.run_id
# Overwrite the run_id so that results will combine across runs
self.dataframe = pd.concat(dataframes.values(), axis=0)
# Check that all results across all runs share the same eval question
self._check_eval_question()
def get_multi_run_analyzer(opt) -> MultiRunAcuteAnalyzer:
"""
Return an object to analyze the results of multiple runs simultaneously.
Load HITs from each run into a separate dataframe, and then pass all dataframes into
a separate analyzer class that will concatenate them.
"""
run_ids = opt['run_ids'].split(',')
# Define paths
assert (
opt['outdir'] is not None
), '--outdir must be specified when combining results of multiple runs!'
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
opt['outdir'] = os.path.join(opt['outdir'], f'combined_runs_{timestamp}')
os.makedirs(opt['outdir'], exist_ok=True)
run_id_list_path = os.path.join(opt['outdir'], 'run_ids.txt')
# Save a simple list of all run IDs stitched together
with open(run_id_list_path, 'w') as f:
for run_id in run_ids:
f.write(run_id + '\n')
# Loop loading HITs over all run ids into dataframes
dataframes = {}
for run_id in run_ids:
print(f'\nStarting to load HITs for run ID {run_id}.')
opt_copy = deepcopy(opt)
opt_copy['run_ids'] = run_id
dataframes[run_id] = AcuteAnalyzer(opt_copy).dataframe
return MultiRunAcuteAnalyzer(opt=opt, dataframes=dataframes)
def render_row(row):
result = []
for i, turn in enumerate(row['winner_dialogue']['dialogue']):
speakername = turn['id']
text = turn['text']
is_bot = (speakername != 'human_evaluator') and (speakername != 'other_speaker')
if i > 2 and is_bot:
speakername = 'bot'
align = 'right' if is_bot else 'left'
color = "white" if is_bot else "black"
bgcolor = '#2391f7' if is_bot else '#e1e1e7'
result.append(
(
'<div style="overflow: auto; padding: 1ex 0;">'
'<div style="clear: both; float: {}; color: {}; background-color: {}; padding: 0.5em 1em; border-radius: 1em; max-width: 80%">'
'<p style="margin: 0">{}: {}</p>'
'</div>'
'</div>'
).format(align, color, bgcolor, speakername, text)
)
winner_dialogue = (
'<div style="background-color: white; margin: 0em; padding: 0.5em; '
'font-family: sans-serif; font-size: 9pt; width: 99%;">'
+ ''.join(result)
+ '</div>'
)
result = []
for i, turn in enumerate(row['loser_dialogue']['dialogue']):
speakername = turn['id']
is_bot = (speakername != 'human_evaluator') and (speakername != 'other_speaker')
if i > 2 and is_bot:
speakername = 'bot'
text = turn['text']
align = 'right' if is_bot else 'left'
color = "white" if is_bot else "black"
bgcolor = '#2391f7' if is_bot else '#e1e1e7'
result.append(
(
'<div style="overflow: auto; padding: 1ex 0;">'
'<div style="clear: both; float: {}; color: {}; background-color: {}; padding: 0.5em 1em; border-radius: 1em; max-width: 80%">'
'<p style="margin: 0">{}: {}</p>'
'</div>'
'</div>'
).format(align, color, bgcolor, speakername, text)
)
loser_dialogue = (
'<div style="background-color: white; margin: 0em; padding: 0.5em; '
'font-family: sans-serif; font-size: 9pt; width: 99%;">'
+ ''.join(result)
+ '</div>'
)
return HTML(
'<tr><td>{}</td><td>{}</td><td>{}</td></tr>'.format(
winner_dialogue, loser_dialogue, row['reason']
)
)
def render_many_conversations(table):
return HTML(
'<table><tr><th>Winner Conversation</th><th>Loser Conversation</th><th>Reason</th></tr>{}</table>'.format(
''.join(render_row(row).data for i, row in table.iterrows())
)
)
def render_conversations_per_matchups(table, force_reasons=True):
matchups = list(table.matchup.unique())
result = ''
if force_reasons:
table = table[table['reason'] != '']
for matchup in matchups:
length = min(10, len(table[table['matchup'] == matchup]))
result += '<h2>{}</h2><body>{}</body>'.format(
matchup,
render_many_conversations(table[table['matchup'] == matchup][:length]).data,
)
return HTML(result)
if __name__ == "__main__":
parser = setup_args()
opt_ = parser.parse_args()
if ',' not in opt_['run_ids']:
analyzer = AcuteAnalyzer(opt_)
else:
analyzer = get_multi_run_analyzer(opt_)
analyzer.save_results()
# Print win fractions
results = pd.DataFrame(analyzer.get_win_fractions())
print(results.round(2).to_string())
# Print matchup totals with significance
result_ = pd.DataFrame(analyzer.get_matchup_totals_with_significance())
result_ = result_.drop(columns=['matchup', 'agree'])
print(result_.round(2).to_string())
| mit |
danny200309/BuildingMachineLearningSystemsWithPython | ch12/image-classification.py | 21 | 3109 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import mahotas as mh
import numpy as np
from glob import glob
from jug import TaskGenerator
# We need to use the `features` module from chapter 10.
from sys import path
path.append('../ch10')
# This is the jug-enabled version of the script ``figure18.py`` in Chapter 10
basedir = '../SimpleImageDataset/'
@TaskGenerator
def compute_texture(im):
'''Compute features for an image
Parameters
----------
im : str
filepath for image to process
Returns
-------
fs : ndarray
1-D array of features
'''
from features import texture
imc = mh.imread(im)
return texture(mh.colors.rgb2grey(imc))
@TaskGenerator
def chist(fname):
from features import color_histogram
im = mh.imread(fname)
return color_histogram(im)
@TaskGenerator
def compute_lbp(fname):
from mahotas.features import lbp
imc = mh.imread(fname)
im = mh.colors.rgb2grey(imc)
return lbp(im, radius=8, points=6)
@TaskGenerator
def accuracy(features, labels):
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn import cross_validation
# We use logistic regression because it is very fast.
# Feel free to experiment with other classifiers
clf = Pipeline([('preproc', StandardScaler()),
('classifier', LogisticRegression())])
cv = cross_validation.LeaveOneOut(len(features))
scores = cross_validation.cross_val_score(
clf, features, labels, cv=cv)
return scores.mean()
@TaskGenerator
def print_results(scores):
with open('results.image.txt', 'w') as output:
for k,v in scores:
output.write('Accuracy (LOO x-val) with Logistic Regression [{0}]: {1:.1%}\n'.format(
k, v.mean()))
to_array = TaskGenerator(np.array)
hstack = TaskGenerator(np.hstack)
haralicks = []
chists = []
lbps = []
labels = []
# Use glob to get all the images
images = glob('{0}/*.jpg'.format(basedir))
for fname in sorted(images):
haralicks.append(compute_texture(fname))
chists.append(chist(fname))
lbps.append(compute_lbp(fname))
labels.append(fname[:-len('00.jpg')]) # The class is encoded in the filename as xxxx00.jpg
haralicks = to_array(haralicks)
chists = to_array(chists)
lbps = to_array(lbps)
labels = to_array(labels)
scores_base = accuracy(haralicks, labels)
scores_chist = accuracy(chists, labels)
scores_lbps = accuracy(lbps, labels)
combined = hstack([chists, haralicks])
scores_combined = accuracy(combined, labels)
combined_all = hstack([chists, haralicks, lbps])
scores_combined_all = accuracy(combined_all, labels)
print_results([
('base', scores_base),
('chists', scores_chist),
('lbps', scores_lbps),
('combined' , scores_combined),
('combined_all' , scores_combined_all),
])
| mit |
codewatchorg/droidboxhelper | droidbox.py | 1 | 15436 | ################################################################################
# (c) 2011, The Honeynet Project
# Author: Patrik Lantz [email protected] and Laurent Delosieres [email protected]
#
# This program is free software you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
################################################################################
"""Analyze dynamically Android applications
This script allows you to analyze dynamically Android applications. It installs, runs, and analyzes Android applications.
At the end of each analysis, it outputs the Android application's characteristics in JSON.
Please keep in mind that all data received/sent, read/written are shown in hexadecimal since the handled data can contain binary data.
"""
import sys, json, time, curses, signal, os, inspect
import zipfile, StringIO
import tempfile, shutil
import operator
import subprocess
import thread, threading
import re
from threading import Thread
from xml.dom import minidom
from subprocess import call, PIPE, Popen
from utils import AXMLPrinter
import hashlib
from pylab import *
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from matplotlib.font_manager import FontProperties
from collections import OrderedDict
sendsms = {}
phonecalls = {}
cryptousage = {}
dexclass = {}
dataleaks = {}
opennet = {}
sendnet = {}
recvnet = {}
closenet = {}
fdaccess = {}
servicestart = {}
accessedfiles = {}
tags = { 0x1 : "TAINT_LOCATION", 0x2: "TAINT_CONTACTS", 0x4: "TAINT_MIC", 0x8: "TAINT_PHONE_NUMBER",
0x10: "TAINT_LOCATION_GPS", 0x20: "TAINT_LOCATION_NET", 0x40: "TAINT_LOCATION_LAST", 0x80: "TAINT_CAMERA",
0x100: "TAINT_ACCELEROMETER", 0x200: "TAINT_SMS", 0x400: "TAINT_IMEI", 0x800: "TAINT_IMSI",
0x1000: "TAINT_ICCID", 0x2000: "TAINT_DEVICE_SN", 0x4000: "TAINT_ACCOUNT", 0x8000: "TAINT_BROWSER",
0x10000: "TAINT_OTHERDB", 0x20000: "TAINT_FILECONTENT", 0x40000: "TAINT_PACKAGE", 0x80000: "TAINT_CALL_LOG",
0x100000: "TAINT_EMAIL", 0x200000: "TAINT_CALENDAR", 0x400000: "TAINT_SETTINGS" }
class CountingThread(Thread):
"""
Used for user interface, showing in progress sign
and number of collected logs from the sandbox system
"""
def __init__ (self):
"""
Constructor
"""
Thread.__init__(self)
self.stop = False
self.logs = 0
def stopCounting(self):
"""
Mark to stop this thread
"""
self.stop = True
def increaseCount(self):
self.logs = self.logs + 1
def run(self):
"""
Update the progress sign and
number of collected logs
"""
signs = ['|', '/', '-', '\\']
counter = 0
while 1:
sign = signs[counter % len(signs)]
sys.stdout.write(" \033[132m[%s] Collected %s sandbox logs\033[1m (Ctrl-C to view logs)\r" % (sign, str(self.logs)))
sys.stdout.flush()
time.sleep(0.5)
counter = counter + 1
if self.stop:
sys.stdout.write(" \033[132m[%s] Collected %s sandbox logs\033[1m%s\r" % ('*', str(self.logs), ' '*25))
sys.stdout.flush()
break
class Application:
"""
Used for extracting information of an Android APK
"""
def __init__(self, filename):
self.filename = filename
self.packageNames = []
self.enfperm = []
self.permissions = []
self.recvs = []
self.activities = {}
self.recvsaction = {}
self.mainActivity = None
def processAPK(self):
xml = {}
error = True
try:
zip = zipfile.ZipFile(self.filename)
for i in zip.namelist() :
if i == "AndroidManifest.xml" :
try :
xml[i] = minidom.parseString( zip.read( i ) )
except :
xml[i] = minidom.parseString( AXMLPrinter( zip.read( i ) ).getBuff() )
for item in xml[i].getElementsByTagName('manifest'):
self.packageNames.append( str( item.getAttribute("package") ) )
for item in xml[i].getElementsByTagName('permission'):
self.enfperm.append( str( item.getAttribute("android:name") ) )
for item in xml[i].getElementsByTagName('uses-permission'):
self.permissions.append( str( item.getAttribute("android:name") ) )
for item in xml[i].getElementsByTagName('receiver'):
self.recvs.append( str( item.getAttribute("android:name") ) )
for child in item.getElementsByTagName('action'):
self.recvsaction[str( item.getAttribute("android:name") )] = (str( child.getAttribute("android:name") ))
for item in xml[i].getElementsByTagName('activity'):
activity = str( item.getAttribute("android:name") )
self.activities[activity] = {}
self.activities[activity]["actions"] = list()
for child in item.getElementsByTagName('action'):
self.activities[activity]["actions"].append(str(child.getAttribute("android:name")))
for activity in self.activities:
for action in self.activities[activity]["actions"]:
if action == 'android.intent.action.MAIN':
self.mainActivity = activity
error = False
break
if (error == False):
return 1
else:
return 0
except:
return 0
def getEnfperm(self):
return self.enfperm
def getRecvsaction(self):
return self.recvsaction
def getMainActivity(self):
return self.mainActivity
def getActivities(self):
return self.activities
def getRecvActions(self):
return self.recvsaction
def getPackage(self):
#One application has only one package name
return self.packageNames[0]
def getHashes(self, block_size=2**8):
"""
Calculate MD5,SHA-1, SHA-256
hashes of APK input file
"""
md5 = hashlib.md5()
sha1 = hashlib.sha1()
sha256 = hashlib.sha256()
f = open(self.filename, 'rb')
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
sha1.update(data)
sha256.update(data)
return [md5.hexdigest(), sha1.hexdigest(), sha256.hexdigest()]
def decode(s, encodings=('ascii', 'utf8', 'latin1')):
for encoding in encodings:
try:
return s.decode(encoding)
except UnicodeDecodeError:
pass
return s.decode('ascii', 'ignore')
def getTags(tagParam):
"""
Retrieve the tag names
"""
tagsFound = []
for tag in tags.keys():
if tagParam & tag != 0:
tagsFound.append(tags[tag])
return tagsFound
def hexToStr(hexStr):
"""
Convert a string hex byte values into a byte string
"""
bytes = []
hexStr = ''.join(hexStr.split(" "))
for i in range(0, len(hexStr), 2):
bytes.append(chr(int(hexStr[i:i+2], 16)))
return unicode(''.join( bytes ), errors='replace')
def interruptHandler(signum, frame):
"""
Raise interrupt for the blocking call 'logcatInput = sys.stdin.readline()'
"""
raise KeyboardInterrupt
def main(argv):
if len(argv) < 2 or len(argv) > 3:
print("Usage: droidbox.py filename.apk <duration in seconds>")
sys.exit(1)
duration = 0
#Duration given?
if len(argv) == 3:
duration = int(argv[2])
apkName = sys.argv[1]
#APK existing?
if os.path.isfile(apkName) == False:
print("File %s not found" % argv[1])
sys.exit(1)
application = Application(apkName)
ret = application.processAPK()
#Error during the APK processing?
if (ret == 0):
print("Failed to analyze the APK. Terminate the analysis.")
sys.exit(1)
activities = application.getActivities()
mainActivity = application.getMainActivity()
packageName = application.getPackage()
recvsaction = application.getRecvsaction()
enfperm = application.getEnfperm()
#Get the hashes
hashes = application.getHashes()
curses.setupterm()
sys.stdout.write(curses.tigetstr("clear"))
sys.stdout.flush()
call(['adb', 'logcat', '-c'])
print " ____ __ ____"
print "/\ _`\ __ /\ \/\ _`\\"
print "\ \ \/\ \ _ __ ___ /\_\ \_\ \ \ \L\ \ ___ __ _"
print " \ \ \ \ \/\`'__\ __`\/\ \ /'_` \ \ _ <' / __`\/\ \/'\\"
print " \ \ \_\ \ \ \/\ \L\ \ \ \/\ \L\ \ \ \L\ \\ \L\ \/> </"
print " \ \____/\ \_\ \____/\ \_\ \___,_\ \____/ \____//\_/\_\\"
print " \/___/ \/_/\/___/ \/_/\/__,_ /\/___/ \/___/ \//\/_/"
#No Main acitvity found? Return an error
if mainActivity == None:
print("No activity to start. Terminate the analysis.")
sys.exit(1)
#No packages identified? Return an error
if packageName == None:
print("No package found. Terminate the analysis.")
sys.exit(1)
#Execute the application
ret = call(['monkeyrunner', 'monkeyrunner.py', apkName, packageName, mainActivity], stderr=PIPE, cwd=os.path.dirname(os.path.realpath(__file__)))
if (ret == 1):
print("Failed to execute the application.")
sys.exit(1)
print("Starting the activity %s..." % mainActivity)
#By default the application has not started
applicationStarted = 0
stringApplicationStarted = "Start proc %s" % packageName
#Open the adb logcat
adb = Popen(["adb", "logcat", "DroidBox:W", "dalvikvm:W", "ActivityManager:I"], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
#Wait for the application to start
while 1:
try:
logcatInput = adb.stdout.readline()
if not logcatInput:
raise Exception("We have lost the connection with ADB.")
#Application started?
if (stringApplicationStarted in logcatInput):
applicationStarted = 1
break;
except:
break
if (applicationStarted == 0):
print("Analysis has not been done.")
#Kill ADB, otherwise it will never terminate
os.kill(adb.pid, signal.SIGTERM)
sys.exit(1)
print("Application started")
print("Analyzing the application during %s seconds..." % (duration if (duration !=0) else "infinite time"))
count = CountingThread()
count.start()
timeStamp = time.time()
if duration:
signal.signal(signal.SIGALRM, interruptHandler)
signal.alarm(duration)
#Collect DroidBox logs
while 1:
try:
logcatInput = adb.stdout.readline()
if not logcatInput:
raise Exception("We have lost the connection with ADB.")
boxlog = logcatInput.split('DroidBox:')
if len(boxlog) > 1:
try:
load = json.loads(decode(boxlog[1]))
# DexClassLoader
if load.has_key('DexClassLoader'):
load['DexClassLoader']['type'] = 'dexload'
dexclass[time.time() - timeStamp] = load['DexClassLoader']
count.increaseCount()
# service started
if load.has_key('ServiceStart'):
load['ServiceStart']['type'] = 'service'
servicestart[time.time() - timeStamp] = load['ServiceStart']
count.increaseCount()
# received data from net
if load.has_key('RecvNet'):
host = load['RecvNet']['srchost']
port = load['RecvNet']['srcport']
recvnet[time.time() - timeStamp] = recvdata = {'type': 'net read', 'host': host, 'port': port, 'data': load['RecvNet']['data']}
count.increaseCount()
# fdaccess
if load.has_key('FdAccess'):
accessedfiles[load['FdAccess']['id']] = hexToStr(load['FdAccess']['path'])
# file read or write
if load.has_key('FileRW'):
load['FileRW']['path'] = accessedfiles[load['FileRW']['id']]
if load['FileRW']['operation'] == 'write':
load['FileRW']['type'] = 'file write'
else:
load['FileRW']['type'] = 'file read'
fdaccess[time.time()-timeStamp] = load['FileRW']
count.increaseCount()
# opened network connection log
if load.has_key('OpenNet'):
opennet[time.time()-timeStamp] = load['OpenNet']
count.increaseCount()
# closed socket
if load.has_key('CloseNet'):
closenet[time.time()-timeStamp] = load['CloseNet']
count.increaseCount()
# outgoing network activity log
if load.has_key('SendNet'):
load['SendNet']['type'] = 'net write'
sendnet[time.time()-timeStamp] = load['SendNet']
count.increaseCount()
# data leak log
if load.has_key('DataLeak'):
my_time = time.time()-timeStamp
load['DataLeak']['type'] = 'leak'
load['DataLeak']['tag'] = getTags(int(load['DataLeak']['tag'], 16))
dataleaks[my_time] = load['DataLeak']
count.increaseCount()
if load['DataLeak']['sink'] == 'Network':
load['DataLeak']['type'] = 'net write'
sendnet[my_time] = load['DataLeak']
count.increaseCount()
elif load['DataLeak']['sink'] == 'File':
load['DataLeak']['path'] = accessedfiles[load['DataLeak']['id']]
if load['DataLeak']['operation'] == 'write':
load['DataLeak']['type'] = 'file write'
else:
load['DataLeak']['type'] = 'file read'
fdaccess[my_time] = load['DataLeak']
count.increaseCount()
elif load['DataLeak']['sink'] == 'SMS':
load['DataLeak']['type'] = 'sms'
sendsms[my_time] = load['DataLeak']
count.increaseCount()
# sent sms log
if load.has_key('SendSMS'):
load['SendSMS']['type'] = 'sms'
sendsms[time.time()-timeStamp] = load['SendSMS']
count.increaseCount()
# phone call log
if load.has_key('PhoneCall'):
load['PhoneCall']['type'] = 'call'
phonecalls[time.time()-timeStamp] = load['PhoneCall']
count.increaseCount()
# crypto api usage log
if load.has_key('CryptoUsage'):
load['CryptoUsage']['type'] = 'crypto'
cryptousage[time.time()-timeStamp] = load['CryptoUsage']
count.increaseCount()
except ValueError:
pass
except:
try:
count.stopCounting()
count.join()
finally:
break;
#Kill ADB, otherwise it will never terminate
os.kill(adb.pid, signal.SIGTERM)
#Done? Store the objects in a dictionary, transform it in a JSON object and return it
output = dict()
#Sort the items by their key
output["dexclass"] = dexclass
output["servicestart"] = servicestart
output["recvnet"] = recvnet
output["opennet"] = opennet
output["sendnet"] = sendnet
output["closenet"] = closenet
output["accessedfiles"] = accessedfiles
output["dataleaks"] = dataleaks
output["fdaccess"] = fdaccess
output["sendsms"] = sendsms
output["phonecalls"] = phonecalls
output["cryptousage"] = cryptousage
output["recvsaction"] = recvsaction
output["enfperm"] = enfperm
output["hashes"] = hashes
output["apkName"] = apkName
print(json.dumps(output))
jsonhash = hashlib.sha1(json.dumps(output)).hexdigest()+'.json'
print 'Saving JSON data to file: '+jsonhash+'\n'
droidLog = open(jsonhash, 'w')
droidLog.write(json.dumps(output))
droidLog.close()
sys.exit(0)
if __name__ == "__main__":
main(sys.argv)
| unlicense |
Titan-C/scikit-learn | sklearn/feature_selection/tests/test_from_model.py | 5 | 7177 | import numpy as np
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import skip_if_32bit
from sklearn import datasets
from sklearn.linear_model import LogisticRegression, SGDClassifier, Lasso
from sklearn.svm import LinearSVC
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
iris = datasets.load_iris()
data, y = iris.data, iris.target
rng = np.random.RandomState(0)
def test_invalid_input():
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=None)
for threshold in ["gobbledigook", ".5 * gobbledigook"]:
model = SelectFromModel(clf, threshold=threshold)
model.fit(data, y)
assert_raises(ValueError, model.transform, data)
def test_input_estimator_unchanged():
"""
Test that SelectFromModel fits on a clone of the estimator.
"""
est = RandomForestClassifier()
transformer = SelectFromModel(estimator=est)
transformer.fit(data, y)
assert_true(transformer.estimator is est)
@skip_if_32bit
def test_feature_importances():
X, y = datasets.make_classification(
n_samples=1000, n_features=10, n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False, random_state=0)
est = RandomForestClassifier(n_estimators=50, random_state=0)
for threshold, func in zip(["mean", "median"], [np.mean, np.median]):
transformer = SelectFromModel(estimator=est, threshold=threshold)
transformer.fit(X, y)
assert_true(hasattr(transformer.estimator_, 'feature_importances_'))
X_new = transformer.transform(X)
assert_less(X_new.shape[1], X.shape[1])
importances = transformer.estimator_.feature_importances_
feature_mask = np.abs(importances) > func(importances)
assert_array_almost_equal(X_new, X[:, feature_mask])
# Check with sample weights
sample_weight = np.ones(y.shape)
sample_weight[y == 1] *= 100
est = RandomForestClassifier(n_estimators=50, random_state=0)
transformer = SelectFromModel(estimator=est)
transformer.fit(X, y, sample_weight=sample_weight)
importances = transformer.estimator_.feature_importances_
transformer.fit(X, y, sample_weight=3 * sample_weight)
importances_bis = transformer.estimator_.feature_importances_
assert_almost_equal(importances, importances_bis)
# For the Lasso and related models, the threshold defaults to 1e-5
transformer = SelectFromModel(estimator=Lasso(alpha=0.1))
transformer.fit(X, y)
X_new = transformer.transform(X)
mask = np.abs(transformer.estimator_.coef_) > 1e-5
assert_array_equal(X_new, X[:, mask])
@skip_if_32bit
def test_feature_importances_2d_coef():
X, y = datasets.make_classification(
n_samples=1000, n_features=10, n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False, random_state=0, n_classes=4)
est = LogisticRegression()
for threshold, func in zip(["mean", "median"], [np.mean, np.median]):
for order in [1, 2, np.inf]:
# Fit SelectFromModel a multi-class problem
transformer = SelectFromModel(estimator=LogisticRegression(),
threshold=threshold,
norm_order=order)
transformer.fit(X, y)
assert_true(hasattr(transformer.estimator_, 'coef_'))
X_new = transformer.transform(X)
assert_less(X_new.shape[1], X.shape[1])
# Manually check that the norm is correctly performed
est.fit(X, y)
importances = np.linalg.norm(est.coef_, axis=0, ord=order)
feature_mask = importances > func(importances)
assert_array_equal(X_new, X[:, feature_mask])
def test_partial_fit():
est = PassiveAggressiveClassifier(random_state=0, shuffle=False)
transformer = SelectFromModel(estimator=est)
transformer.partial_fit(data, y,
classes=np.unique(y))
old_model = transformer.estimator_
transformer.partial_fit(data, y,
classes=np.unique(y))
new_model = transformer.estimator_
assert_true(old_model is new_model)
X_transform = transformer.transform(data)
transformer.fit(np.vstack((data, data)), np.concatenate((y, y)))
assert_array_equal(X_transform, transformer.transform(data))
# check that if est doesn't have partial_fit, neither does SelectFromModel
transformer = SelectFromModel(estimator=RandomForestClassifier())
assert_false(hasattr(transformer, "partial_fit"))
def test_calling_fit_reinitializes():
est = LinearSVC(random_state=0)
transformer = SelectFromModel(estimator=est)
transformer.fit(data, y)
transformer.set_params(estimator__C=100)
transformer.fit(data, y)
assert_equal(transformer.estimator_.C, 100)
def test_prefit():
"""
Test all possible combinations of the prefit parameter.
"""
# Passing a prefit parameter with the selected model
# and fitting a unfit model with prefit=False should give same results.
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=0)
model = SelectFromModel(clf)
model.fit(data, y)
X_transform = model.transform(data)
clf.fit(data, y)
model = SelectFromModel(clf, prefit=True)
assert_array_equal(model.transform(data), X_transform)
# Check that the model is rewritten if prefit=False and a fitted model is
# passed
model = SelectFromModel(clf, prefit=False)
model.fit(data, y)
assert_array_equal(model.transform(data), X_transform)
# Check that prefit=True and calling fit raises a ValueError
model = SelectFromModel(clf, prefit=True)
assert_raises(ValueError, model.fit, data, y)
def test_threshold_string():
est = RandomForestClassifier(n_estimators=50, random_state=0)
model = SelectFromModel(est, threshold="0.5*mean")
model.fit(data, y)
X_transform = model.transform(data)
# Calculate the threshold from the estimator directly.
est.fit(data, y)
threshold = 0.5 * np.mean(est.feature_importances_)
mask = est.feature_importances_ > threshold
assert_array_equal(X_transform, data[:, mask])
def test_threshold_without_refitting():
"""Test that the threshold can be set without refitting the model."""
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=0)
model = SelectFromModel(clf, threshold="0.1 * mean")
model.fit(data, y)
X_transform = model.transform(data)
# Set a higher threshold to filter out more features.
model.threshold = "1.0 * mean"
assert_greater(X_transform.shape[1], model.transform(data).shape[1])
| bsd-3-clause |
akshaybabloo/Car-ND | Term_1/advanced_lane_finding_10/color_space_10_8.py | 1 | 2835 | """
HLS and Color Threshold
-----------------------
You've now seen that various color thresholds can be applied to find the lane lines in images. Here we'll explore
this a bit further and look at a couple examples to see why a color space like HLS can be more robust.
"""
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
def run():
"""
Run different HLS and its thresholds.
"""
image = mpimg.imread('test6.jpg')
# Converting original to gray
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# Threshold for original image
thresh = (180, 255)
binary = np.zeros_like(gray)
binary[(gray > thresh[0]) & (gray <= thresh[1])] = 1
red = image[:, :, 0]
green = image[:, :, 1]
blue = image[:, :, 2]
thresh_2 = (200, 255)
binary_2 = np.zeros_like(red)
binary_2[(red > thresh_2[0]) & (red <= thresh_2[1])] = 1
# Converting image to HLS
hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
# Splitting HSL
hue = hls[:, :, 0]
lightness = hls[:, :, 1]
saturation = hls[:, :, 2]
# Threshold for saturation
thresh_3 = (90, 255)
binary_3 = np.zeros_like(saturation)
binary_3[(saturation > thresh_3[0]) & (saturation <= thresh_3[1])] = 1
# Threshold for Hue
thresh_4 = (15, 100)
binary_4 = np.zeros_like(hue)
binary_4[(hue > thresh_4[0]) & (hue <= thresh_4[1])] = 1
# -------------------- Figure -----------------------
f = plt.figure()
size_x, size_y = (4, 4)
f.add_subplot(size_x, size_y, 1)
plt.imshow(image)
plt.title("Original")
f.add_subplot(size_x, size_y, 2)
plt.imshow(gray, cmap='gray')
plt.title("Gray")
f.add_subplot(size_x, size_y, 3)
plt.imshow(binary, cmap='gray')
plt.title("Threshold of ({}, {})".format(thresh[0], thresh[1]))
f.add_subplot(size_x, size_y, 4)
plt.imshow(red, cmap='gray')
plt.title("Red")
f.add_subplot(size_x, size_y, 5)
plt.imshow(green, cmap='gray')
plt.title("Green")
f.add_subplot(size_x, size_y, 6)
plt.imshow(blue, cmap='gray')
plt.title("Blue")
f.add_subplot(size_x, size_y, 7)
plt.imshow(binary_2, cmap='gray')
plt.title("Threshold of Red color")
f.add_subplot(size_x, size_y, 8)
plt.imshow(hue, cmap='gray')
plt.title("Hue")
f.add_subplot(size_x, size_y, 9)
plt.imshow(lightness, cmap='gray')
plt.title("Lightness")
f.add_subplot(size_x, size_y, 10)
plt.imshow(saturation, cmap='gray')
plt.title("Saturation")
f.add_subplot(size_x, size_y, 11)
plt.imshow(binary_3, cmap='gray')
plt.title("Threshold of saturation")
f.add_subplot(size_x, size_y, 12)
plt.imshow(binary_4, cmap='gray')
plt.title("Threshold of hue")
plt.show()
if __name__ == '__main__':
run()
| mit |
cpcloud/dask | dask/utils.py | 1 | 30449 | from __future__ import absolute_import, division, print_function
import codecs
import functools
import inspect
import io
import math
import os
import re
import shutil
import struct
import sys
import tempfile
from errno import ENOENT
from collections import Iterator
from contextlib import contextmanager
from importlib import import_module
from threading import Lock
import multiprocessing as mp
import uuid
from weakref import WeakValueDictionary
from .compatibility import (long, getargspec, BZ2File, GzipFile, LZMAFile, PY3,
urlsplit, unicode)
from .core import get_deps
from .context import _globals
from .optimize import key_split # noqa: F401
system_encoding = sys.getdefaultencoding()
if system_encoding == 'ascii':
system_encoding = 'utf-8'
def deepmap(func, *seqs):
""" Apply function inside nested lists
>>> inc = lambda x: x + 1
>>> deepmap(inc, [[1, 2], [3, 4]])
[[2, 3], [4, 5]]
>>> add = lambda x, y: x + y
>>> deepmap(add, [[1, 2], [3, 4]], [[10, 20], [30, 40]])
[[11, 22], [33, 44]]
"""
if isinstance(seqs[0], (list, Iterator)):
return [deepmap(func, *items) for items in zip(*seqs)]
else:
return func(*seqs)
def homogeneous_deepmap(func, seq):
if not seq:
return seq
n = 0
tmp = seq
while isinstance(tmp, list):
n += 1
tmp = tmp[0]
return ndeepmap(n, func, seq)
def ndeepmap(n, func, seq):
""" Call a function on every element within a nested container
>>> def inc(x):
... return x + 1
>>> L = [[1, 2], [3, 4, 5]]
>>> ndeepmap(2, inc, L)
[[2, 3], [4, 5, 6]]
"""
if n == 1:
return [func(item) for item in seq]
elif n > 1:
return [ndeepmap(n - 1, func, item) for item in seq]
elif isinstance(seq, list):
return func(seq[0])
else:
return func(seq)
@contextmanager
def ignoring(*exceptions):
try:
yield
except exceptions:
pass
def import_required(mod_name, error_msg):
"""Attempt to import a required dependency.
Raises a RuntimeError if the requested module is not available.
"""
try:
return import_module(mod_name)
except ImportError:
raise RuntimeError(error_msg)
@contextmanager
def tmpfile(extension='', dir=None):
extension = '.' + extension.lstrip('.')
handle, filename = tempfile.mkstemp(extension, dir=dir)
os.close(handle)
os.remove(filename)
try:
yield filename
finally:
if os.path.exists(filename):
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
with ignoring(OSError):
os.remove(filename)
@contextmanager
def tmpdir(dir=None):
dirname = tempfile.mkdtemp(dir=dir)
try:
yield dirname
finally:
if os.path.exists(dirname):
if os.path.isdir(dirname):
with ignoring(OSError):
shutil.rmtree(dirname)
else:
with ignoring(OSError):
os.remove(dirname)
@contextmanager
def filetext(text, extension='', open=open, mode='w'):
with tmpfile(extension=extension) as filename:
f = open(filename, mode=mode)
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield filename
@contextmanager
def changed_cwd(new_cwd):
old_cwd = os.getcwd()
os.chdir(new_cwd)
try:
yield
finally:
os.chdir(old_cwd)
@contextmanager
def tmp_cwd(dir=None):
with tmpdir(dir) as dirname:
with changed_cwd(dirname):
yield dirname
@contextmanager
def noop_context():
yield
def repr_long_list(seq):
"""
>>> repr_long_list(list(range(100)))
'[0, 1, 2, ..., 98, 99]'
"""
if len(seq) < 8:
return repr(seq)
else:
return repr(seq[:3])[:-1] + ', ..., ' + repr(seq[-2:])[1:]
class IndexCallable(object):
""" Provide getitem syntax for functions
>>> def inc(x):
... return x + 1
>>> I = IndexCallable(inc)
>>> I[3]
4
"""
__slots__ = 'fn',
def __init__(self, fn):
self.fn = fn
def __getitem__(self, key):
return self.fn(key)
@contextmanager
def filetexts(d, open=open, mode='t', use_tmpdir=True):
""" Dumps a number of textfiles to disk
d - dict
a mapping from filename to text like {'a.csv': '1,1\n2,2'}
Since this is meant for use in tests, this context manager will
automatically switch to a temporary current directory, to avoid
race conditions when running tests in parallel.
"""
with (tmp_cwd() if use_tmpdir else noop_context()):
for filename, text in d.items():
f = open(filename, 'w' + mode)
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield list(d)
for filename in d:
if os.path.exists(filename):
with ignoring(OSError):
os.remove(filename)
compressions = {'gz': 'gzip', 'bz2': 'bz2', 'xz': 'xz'}
def infer_compression(filename):
extension = os.path.splitext(filename)[-1].strip('.')
return compressions.get(extension, None)
opens = {'gzip': GzipFile, 'bz2': BZ2File, 'xz': LZMAFile}
def open(filename, mode='rb', compression=None, **kwargs):
if compression == 'infer':
compression = infer_compression(filename)
return opens.get(compression, io.open)(filename, mode, **kwargs)
def get_bom(fn, compression=None):
"""
Get the Byte Order Mark (BOM) if it exists.
"""
boms = set((codecs.BOM_UTF16, codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE))
with open(fn, mode='rb', compression=compression) as f:
f.seek(0)
bom = f.read(2)
f.seek(0)
if bom in boms:
return bom
else:
return b''
def get_bin_linesep(encoding, linesep):
"""
Simply doing `linesep.encode(encoding)` does not always give you
*just* the linesep bytes, for some encodings this prefix's the
linesep bytes with the BOM. This function ensures we just get the
linesep bytes.
"""
if encoding == 'utf-16':
return linesep.encode('utf-16')[2:] # [2:] strips bom
else:
return linesep.encode(encoding)
def textblock(filename, start, end, compression=None, encoding=system_encoding,
linesep=os.linesep, buffersize=4096):
"""Pull out a block of text from a file given start and stop bytes.
This gets data starting/ending from the next linesep delimiter. Each block
consists of bytes in the range [start,end[, i.e. the stop byte is excluded.
If `start` is 0, then `start` corresponds to the true start byte. If
`start` is greater than 0 and does not point to the beginning of a new
line, then `start` is incremented until it corresponds to the start byte of
the next line. If `end` does not point to the beginning of a new line, then
the line that begins before `end` is included in the block although its
last byte exceeds `end`.
Examples
--------
>> with open('myfile.txt', 'wb') as f:
.. f.write('123\n456\n789\nabc')
In the example below, 1 and 10 don't line up with endlines.
>> u''.join(textblock('myfile.txt', 1, 10))
'456\n789\n'
"""
# Make sure `linesep` is not a byte string because
# `io.TextIOWrapper` in Python versions other than 2.7 dislike byte
# strings for the `newline` argument.
linesep = str(linesep)
# Get byte representation of the line separator.
bin_linesep = get_bin_linesep(encoding, linesep)
bin_linesep_len = len(bin_linesep)
if buffersize < bin_linesep_len:
error = ('`buffersize` ({0:d}) must be at least as large as the '
'number of line separator bytes ({1:d}).')
raise ValueError(error.format(buffersize, bin_linesep_len))
chunksize = end - start
with open(filename, 'rb', compression) as f:
with io.BufferedReader(f) as fb:
# If `start` does not correspond to the beginning of the file, we
# need to move the file pointer to `start - len(bin_linesep)`,
# search for the position of the next a line separator, and set
# `start` to the position after that line separator.
if start > 0:
# `start` is decremented by `len(bin_linesep)` to detect the
# case where the original `start` value corresponds to the
# beginning of a line.
start = max(0, start - bin_linesep_len)
# Set the file pointer to `start`.
fb.seek(start)
# Number of bytes to shift the file pointer before reading a
# new chunk to make sure that a multi-byte line separator, that
# is split by the chunk reader, is still detected.
shift = 1 - bin_linesep_len
while True:
buf = f.read(buffersize)
if len(buf) < bin_linesep_len:
raise StopIteration
try:
# Find the position of the next line separator and add
# `len(bin_linesep)` which yields the position of the
# first byte of the next line.
start += buf.index(bin_linesep)
start += bin_linesep_len
except ValueError:
# No line separator was found in the current chunk.
# Before reading the next chunk, we move the file
# pointer back `len(bin_linesep) - 1` bytes to make
# sure that a multi-byte line separator, that may have
# been split by the chunk reader, is still detected.
start += len(buf)
start += shift
fb.seek(shift, os.SEEK_CUR)
else:
# We have found the next line separator, so we need to
# set the file pointer to the first byte of the next
# line.
fb.seek(start)
break
with io.TextIOWrapper(fb, encoding, newline=linesep) as fbw:
# Retrieve and yield lines until the file pointer reaches
# `end`.
while start < end:
line = next(fbw)
# We need to encode the line again to get the byte length
# in order to correctly update `start`.
bin_line_len = len(line.encode(encoding))
if chunksize < bin_line_len:
error = ('`chunksize` ({0:d}) is less than the line '
'length ({1:d}). This may cause duplicate '
'processing of this line. It is advised to '
'increase `chunksize`.')
raise IOError(error.format(chunksize, bin_line_len))
yield line
start += bin_line_len
def concrete(seq):
""" Make nested iterators concrete lists
>>> data = [[1, 2], [3, 4]]
>>> seq = iter(map(iter, data))
>>> concrete(seq)
[[1, 2], [3, 4]]
"""
if isinstance(seq, Iterator):
seq = list(seq)
if isinstance(seq, (tuple, list)):
seq = list(map(concrete, seq))
return seq
def skip(func):
pass
def pseudorandom(n, p, random_state=None):
""" Pseudorandom array of integer indexes
>>> pseudorandom(5, [0.5, 0.5], random_state=123)
array([1, 0, 0, 1, 1], dtype=int8)
>>> pseudorandom(10, [0.5, 0.2, 0.2, 0.1], random_state=5)
array([0, 2, 0, 3, 0, 1, 2, 1, 0, 0], dtype=int8)
"""
import numpy as np
p = list(p)
cp = np.cumsum([0] + p)
assert np.allclose(1, cp[-1])
assert len(p) < 256
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
x = random_state.random_sample(n)
out = np.empty(n, dtype='i1')
for i, (low, high) in enumerate(zip(cp[:-1], cp[1:])):
out[(x >= low) & (x < high)] = i
return out
def random_state_data(n, random_state=None):
"""Return a list of arrays that can initialize
``np.random.RandomState``.
Parameters
----------
n : int
Number of tuples to return.
random_state : int or np.random.RandomState, optional
If an int, is used to seed a new ``RandomState``.
"""
import numpy as np
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
maxuint32 = np.iinfo(np.uint32).max
return [(random_state.rand(624) * maxuint32).astype('uint32')
for i in range(n)]
def is_integer(i):
"""
>>> is_integer(6)
True
>>> is_integer(42.0)
True
>>> is_integer('abc')
False
"""
import numpy as np
if isinstance(i, (int, long)):
return True
if isinstance(i, float):
return (i).is_integer()
if issubclass(type(i), np.integer):
return i
else:
return False
def file_size(fn, compression=None):
""" Size of a file on disk
If compressed then return the uncompressed file size
"""
if compression == 'gzip':
with open(fn, 'rb') as f:
f.seek(-4, 2)
result = struct.unpack('I', f.read(4))[0]
elif compression:
# depending on the implementation, this may be inefficient
with open(fn, 'rb', compression) as f:
result = f.seek(0, 2)
else:
result = os.stat(fn).st_size
return result
ONE_ARITY_BUILTINS = set([abs, all, any, bool, bytearray, bytes, callable, chr,
classmethod, complex, dict, dir, enumerate, eval,
float, format, frozenset, hash, hex, id, int, iter,
len, list, max, min, next, oct, open, ord, range,
repr, reversed, round, set, slice, sorted,
staticmethod, str, sum, tuple,
type, vars, zip, memoryview])
if PY3:
ONE_ARITY_BUILTINS.add(ascii) # noqa: F821
MULTI_ARITY_BUILTINS = set([compile, delattr, divmod, filter, getattr, hasattr,
isinstance, issubclass, map, pow, setattr])
def takes_multiple_arguments(func):
""" Does this function take multiple arguments?
>>> def f(x, y): pass
>>> takes_multiple_arguments(f)
True
>>> def f(x): pass
>>> takes_multiple_arguments(f)
False
>>> def f(x, y=None): pass
>>> takes_multiple_arguments(f)
False
>>> def f(*args): pass
>>> takes_multiple_arguments(f)
True
>>> class Thing(object):
... def __init__(self, a): pass
>>> takes_multiple_arguments(Thing)
False
"""
if func in ONE_ARITY_BUILTINS:
return False
elif func in MULTI_ARITY_BUILTINS:
return True
try:
spec = getargspec(func)
except:
return False
try:
is_constructor = spec.args[0] == 'self' and isinstance(func, type)
except:
is_constructor = False
if spec.varargs:
return True
if spec.defaults is None:
return len(spec.args) - is_constructor != 1
return len(spec.args) - len(spec.defaults) - is_constructor > 1
class Dispatch(object):
"""Simple single dispatch."""
def __init__(self, name=None):
self._lookup = {}
self._lazy = {}
if name:
self.__name__ = name
def register(self, type, func=None):
"""Register dispatch of `func` on arguments of type `type`"""
def wrapper(func):
if isinstance(type, tuple):
for t in type:
self.register(t, func)
else:
self._lookup[type] = func
return func
return wrapper(func) if func is not None else wrapper
def register_lazy(self, toplevel, func=None):
"""
Register a registration function which will be called if the
*toplevel* module (e.g. 'pandas') is ever loaded.
"""
def wrapper(func):
self._lazy[toplevel] = func
return func
return wrapper(func) if func is not None else wrapper
def __call__(self, arg):
# Fast path with direct lookup on type
lk = self._lookup
typ = type(arg)
try:
impl = lk[typ]
except KeyError:
pass
else:
return impl(arg)
# Is a lazy registration function present?
toplevel, _, _ = typ.__module__.partition('.')
try:
register = self._lazy.pop(toplevel)
except KeyError:
pass
else:
register()
return self(arg) # recurse
# Walk the MRO and cache the lookup result
for cls in inspect.getmro(typ)[1:]:
if cls in lk:
lk[typ] = lk[cls]
return lk[cls](arg)
raise TypeError("No dispatch for {0} type".format(typ))
def ensure_not_exists(filename):
"""
Ensure that a file does not exist.
"""
try:
os.unlink(filename)
except OSError as e:
if e.errno != ENOENT:
raise
def _skip_doctest(line):
# NumPy docstring contains cursor and comment only example
stripped = line.strip()
if stripped == '>>>' or stripped.startswith('>>> #'):
return stripped
elif '>>>' in stripped:
return line + ' # doctest: +SKIP'
else:
return line
def skip_doctest(doc):
if doc is None:
return ''
return '\n'.join([_skip_doctest(line) for line in doc.split('\n')])
def derived_from(original_klass, version=None, ua_args=[]):
"""Decorator to attach original class's docstring to the wrapped method.
Parameters
----------
original_klass: type
Original class which the method is derived from
version : str
Original package version which supports the wrapped method
ua_args : list
List of keywords which Dask doesn't support. Keywords existing in
original but not in Dask will automatically be added.
"""
def wrapper(method):
method_name = method.__name__
try:
# do not use wraps here, as it hides keyword arguments displayed
# in the doc
original_method = getattr(original_klass, method_name)
doc = original_method.__doc__
if doc is None:
doc = ''
try:
method_args = getargspec(method).args
original_args = getargspec(original_method).args
not_supported = [m for m in original_args if m not in method_args]
except TypeError:
not_supported = []
if len(ua_args) > 0:
not_supported.extend(ua_args)
if len(not_supported) > 0:
note = ("\n Notes\n -----\n"
" Dask doesn't supports following argument(s).\n\n")
args = ''.join([' * {0}\n'.format(a) for a in not_supported])
doc = doc + note + args
doc = skip_doctest(doc)
method.__doc__ = doc
return method
except AttributeError:
module_name = original_klass.__module__.split('.')[0]
@functools.wraps(method)
def wrapped(*args, **kwargs):
msg = "Base package doesn't support '{0}'.".format(method_name)
if version is not None:
msg2 = " Use {0} {1} or later to use this method."
msg += msg2.format(module_name, version)
raise NotImplementedError(msg)
return wrapped
return wrapper
def funcname(func):
"""Get the name of a function."""
# functools.partial
if isinstance(func, functools.partial):
return funcname(func.func)
# methodcaller
if isinstance(func, methodcaller):
return func.method
module_name = getattr(func, '__module__', None) or ''
type_name = getattr(type(func), '__name__', None) or ''
# toolz.curry
if 'toolz' in module_name and 'curry' == type_name:
return func.func_name
# multipledispatch objects
if 'multipledispatch' in module_name and 'Dispatcher' == type_name:
return func.name
# All other callables
try:
name = func.__name__
if name == '<lambda>':
return 'lambda'
return name
except:
return str(func)
def ensure_bytes(s):
""" Turn string or bytes to bytes
>>> ensure_bytes(u'123')
'123'
>>> ensure_bytes('123')
'123'
>>> ensure_bytes(b'123')
'123'
"""
if isinstance(s, bytes):
return s
if hasattr(s, 'encode'):
return s.encode()
msg = "Object %s is neither a bytes object nor has an encode method"
raise TypeError(msg % s)
def ensure_unicode(s):
""" Turn string or bytes to bytes
>>> ensure_unicode(u'123')
u'123'
>>> ensure_unicode('123')
u'123'
>>> ensure_unicode(b'123')
u'123'
"""
if isinstance(s, unicode):
return s
if hasattr(s, 'decode'):
return s.decode()
msg = "Object %s is neither a bytes object nor has an encode method"
raise TypeError(msg % s)
def digit(n, k, base):
"""
>>> digit(1234, 0, 10)
4
>>> digit(1234, 1, 10)
3
>>> digit(1234, 2, 10)
2
>>> digit(1234, 3, 10)
1
"""
return n // base**k % base
def insert(tup, loc, val):
"""
>>> insert(('a', 'b', 'c'), 0, 'x')
('x', 'b', 'c')
"""
L = list(tup)
L[loc] = val
return tuple(L)
def build_name_function(max_int):
""" Returns a function that receives a single integer
and returns it as a string padded by enough zero characters
to align with maximum possible integer
>>> name_f = build_name_function(57)
>>> name_f(7)
'07'
>>> name_f(31)
'31'
>>> build_name_function(1000)(42)
'0042'
>>> build_name_function(999)(42)
'042'
>>> build_name_function(0)(0)
'0'
"""
# handle corner cases max_int is 0 or exact power of 10
max_int += 1e-8
pad_length = int(math.ceil(math.log10(max_int)))
def name_function(i):
return str(i).zfill(pad_length)
return name_function
def infer_storage_options(urlpath, inherit_storage_options=None):
""" Infer storage options from URL path and merge it with existing storage
options.
Parameters
----------
urlpath: str or unicode
Either local absolute file path or URL (hdfs://namenode:8020/file.csv)
storage_options: dict (optional)
Its contents will get merged with the inferred information from the
given path
Returns
-------
Storage options dict.
Examples
--------
>>> infer_storage_options('/mnt/datasets/test.csv') # doctest: +SKIP
{"protocol": "file", "path", "/mnt/datasets/test.csv"}
>>> infer_storage_options(
... 'hdfs://username:pwd@node:123/mnt/datasets/test.csv?q=1',
... inherit_storage_options={'extra': 'value'}) # doctest: +SKIP
{"protocol": "hdfs", "username": "username", "password": "pwd",
"host": "node", "port": 123, "path": "/mnt/datasets/test.csv",
"url_query": "q=1", "extra": "value"}
"""
# Handle Windows paths including disk name in this special case
if re.match(r'^[a-zA-Z]:[\\/]', urlpath):
return {'protocol': 'file',
'path': urlpath}
parsed_path = urlsplit(urlpath)
protocol = parsed_path.scheme or 'file'
path = parsed_path.path
if protocol == 'file':
# Special case parsing file protocol URL on Windows according to:
# https://msdn.microsoft.com/en-us/library/jj710207.aspx
windows_path = re.match(r'^/([a-zA-Z])[:|]([\\/].*)$', path)
if windows_path:
path = '%s:%s' % windows_path.groups()
inferred_storage_options = {
'protocol': protocol,
'path': path,
}
if parsed_path.netloc:
# Parse `hostname` from netloc manually because `parsed_path.hostname`
# lowercases the hostname which is not always desirable (e.g. in S3):
# https://github.com/dask/dask/issues/1417
inferred_storage_options['host'] = parsed_path.netloc.rsplit('@', 1)[-1].rsplit(':', 1)[0]
if parsed_path.port:
inferred_storage_options['port'] = parsed_path.port
if parsed_path.username:
inferred_storage_options['username'] = parsed_path.username
if parsed_path.password:
inferred_storage_options['password'] = parsed_path.password
if parsed_path.query:
inferred_storage_options['url_query'] = parsed_path.query
if parsed_path.fragment:
inferred_storage_options['url_fragment'] = parsed_path.fragment
if inherit_storage_options:
if set(inherit_storage_options) & set(inferred_storage_options):
raise KeyError("storage options (%r) and path url options (%r) "
"collision is detected"
% (inherit_storage_options, inferred_storage_options))
inferred_storage_options.update(inherit_storage_options)
return inferred_storage_options
def dependency_depth(dsk):
import toolz
deps, _ = get_deps(dsk)
@toolz.memoize
def max_depth_by_deps(key):
if not deps[key]:
return 1
d = 1 + max(max_depth_by_deps(dep_key) for dep_key in deps[key])
return d
return max(max_depth_by_deps(dep_key) for dep_key in deps.keys())
def eq_strict(a, b):
"""Returns True if both values have the same type and are equal."""
if type(a) is type(b):
return a == b
return False
def memory_repr(num):
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
def put_lines(buf, lines):
if any(not isinstance(x, unicode) for x in lines):
lines = [unicode(x) for x in lines]
buf.write('\n'.join(lines))
_method_cache = {}
class methodcaller(object):
"""Return a callable object that calls the given method on its operand.
Unlike the builtin `methodcaller`, this class is serializable"""
__slots__ = ('method',)
func = property(lambda self: self.method) # For `funcname` to work
def __new__(cls, method):
if method in _method_cache:
return _method_cache[method]
self = object.__new__(cls)
self.method = method
_method_cache[method] = self
return self
def __call__(self, obj, *args, **kwargs):
return getattr(obj, self.method)(*args, **kwargs)
def __reduce__(self):
return (methodcaller, (self.method,))
def __str__(self):
return "<%s: %s>" % (self.__class__.__name__, self.method)
__repr__ = __str__
class MethodCache(object):
"""Attribute access on this object returns a methodcaller for that
attribute.
Examples
--------
>>> a = [1, 3, 3]
>>> M.count(a, 3) == a.count(3)
True
"""
__getattr__ = staticmethod(methodcaller)
__dir__ = lambda self: list(_method_cache)
M = MethodCache()
class SerializableLock(object):
_locks = WeakValueDictionary()
""" A Serializable per-process Lock
This wraps a normal ``threading.Lock`` object and satisfies the same
interface. However, this lock can also be serialized and sent to different
processes. It will not block concurrent operations between processes (for
this you should look at ``multiprocessing.Lock`` or ``locket.lock_file``
but will consistently deserialize into the same lock.
So if we make a lock in one process::
lock = SerializableLock()
And then send it over to another process multiple times::
bytes = pickle.dumps(lock)
a = pickle.loads(bytes)
b = pickle.loads(bytes)
Then the deserialized objects will operate as though they were the same
lock, and collide as appropriate.
This is useful for consistently protecting resources on a per-process
level.
The creation of locks is itself not threadsafe.
"""
def __init__(self, token=None):
self.token = token or str(uuid.uuid4())
if self.token in SerializableLock._locks:
self.lock = SerializableLock._locks[self.token]
else:
self.lock = Lock()
SerializableLock._locks[self.token] = self.lock
def acquire(self, *args):
return self.lock.acquire(*args)
def release(self, *args):
return self.lock.release(*args)
def __enter__(self):
self.lock.__enter__()
def __exit__(self, *args):
self.lock.__exit__(*args)
@property
def locked(self):
return self.locked
def __getstate__(self):
return self.token
def __setstate__(self, token):
self.__init__(token)
def __str__(self):
return "<%s: %s>" % (self.__class__.__name__, self.token)
__repr__ = __str__
def effective_get(get=None, collection=None):
"""Get the effective get method used in a given situation"""
collection_get = collection._default_get if collection else None
return get or _globals.get('get') or collection_get
def get_scheduler_lock(get=None, collection=None):
"""Get an instance of the appropriate lock for a certain situation based on
scheduler used."""
from . import multiprocessing
actual_get = effective_get(get, collection)
if actual_get == multiprocessing.get:
return mp.Manager().Lock()
return SerializableLock()
def ensure_dict(d):
if type(d) is dict:
return d
elif hasattr(d, 'dicts'):
result = {}
for dd in d.dicts.values():
result.update(dd)
return result
return dict(d)
| bsd-3-clause |
musically-ut/statsmodels | statsmodels/graphics/mosaicplot.py | 20 | 26989 | """Create a mosaic plot from a contingency table.
It allows to visualize multivariate categorical data in a rigorous
and informative way.
see the docstring of the mosaic function for more informations.
"""
# Author: Enrico Giampieri - 21 Jan 2013
from __future__ import division
from statsmodels.compat.python import (iteritems, iterkeys, lrange, string_types, lzip,
itervalues, zip, range)
import numpy as np
from statsmodels.compat.collections import OrderedDict
from itertools import product
from numpy import iterable, r_, cumsum, array
from statsmodels.graphics import utils
from pandas import DataFrame
__all__ = ["mosaic"]
def _normalize_split(proportion):
"""
return a list of proportions of the available space given the division
if only a number is given, it will assume a split in two pieces
"""
if not iterable(proportion):
if proportion == 0:
proportion = array([0.0, 1.0])
elif proportion >= 1:
proportion = array([1.0, 0.0])
elif proportion < 0:
raise ValueError("proportions should be positive,"
"given value: {}".format(proportion))
else:
proportion = array([proportion, 1.0 - proportion])
proportion = np.asarray(proportion, dtype=float)
if np.any(proportion < 0):
raise ValueError("proportions should be positive,"
"given value: {}".format(proportion))
if np.allclose(proportion, 0):
raise ValueError("at least one proportion should be "
"greater than zero".format(proportion))
# ok, data are meaningful, so go on
if len(proportion) < 2:
return array([0.0, 1.0])
left = r_[0, cumsum(proportion)]
left /= left[-1] * 1.0
return left
def _split_rect(x, y, width, height, proportion, horizontal=True, gap=0.05):
"""
Split the given rectangle in n segments whose proportion is specified
along the given axis if a gap is inserted, they will be separated by a
certain amount of space, retaining the relative proportion between them
a gap of 1 correspond to a plot that is half void and the remaining half
space is proportionally divided among the pieces.
"""
x, y, w, h = float(x), float(y), float(width), float(height)
if (w < 0) or (h < 0):
raise ValueError("dimension of the square less than"
"zero w={} h=()".format(w, h))
proportions = _normalize_split(proportion)
# extract the starting point and the dimension of each subdivision
# in respect to the unit square
starting = proportions[:-1]
amplitude = proportions[1:] - starting
# how much each extrema is going to be displaced due to gaps
starting += gap * np.arange(len(proportions) - 1)
# how much the squares plus the gaps are extended
extension = starting[-1] + amplitude[-1] - starting[0]
# normalize everything for fit again in the original dimension
starting /= extension
amplitude /= extension
# bring everything to the original square
starting = (x if horizontal else y) + starting * (w if horizontal else h)
amplitude = amplitude * (w if horizontal else h)
# create each 4-tuple for each new block
results = [(s, y, a, h) if horizontal else (x, s, w, a)
for s, a in zip(starting, amplitude)]
return results
def _reduce_dict(count_dict, partial_key):
"""
Make partial sum on a counter dict.
Given a match for the beginning of the category, it will sum each value.
"""
L = len(partial_key)
count = sum(v for k, v in iteritems(count_dict) if k[:L] == partial_key)
return count
def _key_splitting(rect_dict, keys, values, key_subset, horizontal, gap):
"""
Given a dictionary where each entry is a rectangle, a list of key and
value (count of elements in each category) it split each rect accordingly,
as long as the key start with the tuple key_subset. The other keys are
returned without modification.
"""
result = OrderedDict()
L = len(key_subset)
for name, (x, y, w, h) in iteritems(rect_dict):
if key_subset == name[:L]:
# split base on the values given
divisions = _split_rect(x, y, w, h, values, horizontal, gap)
for key, rect in zip(keys, divisions):
result[name + (key,)] = rect
else:
result[name] = (x, y, w, h)
return result
def _tuplify(obj):
"""convert an object in a tuple of strings (even if it is not iterable,
like a single integer number, but keep the string healthy)
"""
if np.iterable(obj) and not isinstance(obj, string_types):
res = tuple(str(o) for o in obj)
else:
res = (str(obj),)
return res
def _categories_level(keys):
"""use the Ordered dict to implement a simple ordered set
return each level of each category
[[key_1_level_1,key_2_level_1],[key_1_level_2,key_2_level_2]]
"""
res = []
for i in zip(*(keys)):
tuplefied = _tuplify(i)
res.append(list(OrderedDict([(j, None) for j in tuplefied])))
return res
def _hierarchical_split(count_dict, horizontal=True, gap=0.05):
"""
Split a square in a hierarchical way given a contingency table.
Hierarchically split the unit square in alternate directions
in proportion to the subdivision contained in the contingency table
count_dict. This is the function that actually perform the tiling
for the creation of the mosaic plot. If the gap array has been specified
it will insert a corresponding amount of space (proportional to the
unit lenght), while retaining the proportionality of the tiles.
Parameters
----------
count_dict : dict
Dictionary containing the contingency table.
Each category should contain a non-negative number
with a tuple as index. It expects that all the combination
of keys to be representes; if that is not true, will
automatically consider the missing values as 0
horizontal : bool
The starting direction of the split (by default along
the horizontal axis)
gap : float or array of floats
The list of gaps to be applied on each subdivision.
If the lenght of the given array is less of the number
of subcategories (or if it's a single number) it will extend
it with exponentially decreasing gaps
Returns
----------
base_rect : dict
A dictionary containing the result of the split.
To each key is associated a 4-tuple of coordinates
that are required to create the corresponding rectangle:
0 - x position of the lower left corner
1 - y position of the lower left corner
2 - width of the rectangle
3 - height of the rectangle
"""
# this is the unit square that we are going to divide
base_rect = OrderedDict([(tuple(), (0, 0, 1, 1))])
# get the list of each possible value for each level
categories_levels = _categories_level(list(iterkeys(count_dict)))
L = len(categories_levels)
# recreate the gaps vector starting from an int
if not np.iterable(gap):
gap = [gap / 1.5 ** idx for idx in range(L)]
# extend if it's too short
if len(gap) < L:
last = gap[-1]
gap = list(*gap) + [last / 1.5 ** idx for idx in range(L)]
# trim if it's too long
gap = gap[:L]
# put the count dictionay in order for the keys
# this will allow some code simplification
count_ordered = OrderedDict([(k, count_dict[k])
for k in list(product(*categories_levels))])
for cat_idx, cat_enum in enumerate(categories_levels):
# get the partial key up to the actual level
base_keys = list(product(*categories_levels[:cat_idx]))
for key in base_keys:
# for each partial and each value calculate how many
# observation we have in the counting dictionary
part_count = [_reduce_dict(count_ordered, key + (partial,))
for partial in cat_enum]
# reduce the gap for subsequents levels
new_gap = gap[cat_idx]
# split the given subkeys in the rectangle dictionary
base_rect = _key_splitting(base_rect, cat_enum, part_count, key,
horizontal, new_gap)
horizontal = not horizontal
return base_rect
def _single_hsv_to_rgb(hsv):
"""Transform a color from the hsv space to the rgb."""
from matplotlib.colors import hsv_to_rgb
return hsv_to_rgb(array(hsv).reshape(1, 1, 3)).reshape(3)
def _create_default_properties(data):
""""Create the default properties of the mosaic given the data
first it will varies the color hue (first category) then the color
saturation (second category) and then the color value
(third category). If a fourth category is found, it will put
decoration on the rectangle. Doesn't manage more than four
level of categories
"""
categories_levels = _categories_level(list(iterkeys(data)))
Nlevels = len(categories_levels)
# first level, the hue
L = len(categories_levels[0])
# hue = np.linspace(1.0, 0.0, L+1)[:-1]
hue = np.linspace(0.0, 1.0, L + 2)[:-2]
# second level, the saturation
L = len(categories_levels[1]) if Nlevels > 1 else 1
saturation = np.linspace(0.5, 1.0, L + 1)[:-1]
# third level, the value
L = len(categories_levels[2]) if Nlevels > 2 else 1
value = np.linspace(0.5, 1.0, L + 1)[:-1]
# fourth level, the hatch
L = len(categories_levels[3]) if Nlevels > 3 else 1
hatch = ['', '/', '-', '|', '+'][:L + 1]
# convert in list and merge with the levels
hue = lzip(list(hue), categories_levels[0])
saturation = lzip(list(saturation),
categories_levels[1] if Nlevels > 1 else [''])
value = lzip(list(value),
categories_levels[2] if Nlevels > 2 else [''])
hatch = lzip(list(hatch),
categories_levels[3] if Nlevels > 3 else [''])
# create the properties dictionary
properties = {}
for h, s, v, t in product(hue, saturation, value, hatch):
hv, hn = h
sv, sn = s
vv, vn = v
tv, tn = t
level = (hn,) + ((sn,) if sn else tuple())
level = level + ((vn,) if vn else tuple())
level = level + ((tn,) if tn else tuple())
hsv = array([hv, sv, vv])
prop = {'color': _single_hsv_to_rgb(hsv), 'hatch': tv, 'lw': 0}
properties[level] = prop
return properties
def _normalize_data(data, index):
"""normalize the data to a dict with tuples of strings as keys
right now it works with:
0 - dictionary (or equivalent mappable)
1 - pandas.Series with simple or hierarchical indexes
2 - numpy.ndarrays
3 - everything that can be converted to a numpy array
4 - pandas.DataFrame (via the _normalize_dataframe function)
"""
# if data is a dataframe we need to take a completely new road
# before coming back here. Use the hasattr to avoid importing
# pandas explicitly
if hasattr(data, 'pivot') and hasattr(data, 'groupby'):
data = _normalize_dataframe(data, index)
index = None
# can it be used as a dictionary?
try:
items = list(iteritems(data))
except AttributeError:
# ok, I cannot use the data as a dictionary
# Try to convert it to a numpy array, or die trying
data = np.asarray(data)
temp = OrderedDict()
for idx in np.ndindex(data.shape):
name = tuple(i for i in idx)
temp[name] = data[idx]
data = temp
items = list(iteritems(data))
# make all the keys a tuple, even if simple numbers
data = OrderedDict([_tuplify(k), v] for k, v in items)
categories_levels = _categories_level(list(iterkeys(data)))
# fill the void in the counting dictionary
indexes = product(*categories_levels)
contingency = OrderedDict([(k, data.get(k, 0)) for k in indexes])
data = contingency
# reorder the keys order according to the one specified by the user
# or if the index is None convert it into a simple list
# right now it doesn't do any check, but can be modified in the future
index = lrange(len(categories_levels)) if index is None else index
contingency = OrderedDict()
for key, value in iteritems(data):
new_key = tuple(key[i] for i in index)
contingency[new_key] = value
data = contingency
return data
def _normalize_dataframe(dataframe, index):
"""Take a pandas DataFrame and count the element present in the
given columns, return a hierarchical index on those columns
"""
#groupby the given keys, extract the same columns and count the element
# then collapse them with a mean
data = dataframe[index].dropna()
grouped = data.groupby(index, sort=False)
counted = grouped[index].count()
averaged = counted.mean(axis=1)
return averaged
def _statistical_coloring(data):
"""evaluate colors from the indipendence properties of the matrix
It will encounter problem if one category has all zeros
"""
data = _normalize_data(data, None)
categories_levels = _categories_level(list(iterkeys(data)))
Nlevels = len(categories_levels)
total = 1.0 * sum(v for v in itervalues(data))
# count the proportion of observation
# for each level that has the given name
# at each level
levels_count = []
for level_idx in range(Nlevels):
proportion = {}
for level in categories_levels[level_idx]:
proportion[level] = 0.0
for key, value in iteritems(data):
if level == key[level_idx]:
proportion[level] += value
proportion[level] /= total
levels_count.append(proportion)
# for each key I obtain the expected value
# and it's standard deviation from a binomial distribution
# under the hipothesys of independence
expected = {}
for key, value in iteritems(data):
base = 1.0
for i, k in enumerate(key):
base *= levels_count[i][k]
expected[key] = base * total, np.sqrt(total * base * (1.0 - base))
# now we have the standard deviation of distance from the
# expected value for each tile. We create the colors from this
sigmas = dict((k, (data[k] - m) / s) for k, (m, s) in iteritems(expected))
props = {}
for key, dev in iteritems(sigmas):
red = 0.0 if dev < 0 else (dev / (1 + dev))
blue = 0.0 if dev > 0 else (dev / (-1 + dev))
green = (1.0 - red - blue) / 2.0
hatch = 'x' if dev > 2 else 'o' if dev < -2 else ''
props[key] = {'color': [red, green, blue], 'hatch': hatch}
return props
def _get_position(x, w, h, W):
if W == 0:
return x
return (x + w / 2.0) * w * h / W
def _create_labels(rects, horizontal, ax, rotation):
"""find the position of the label for each value of each category
right now it supports only up to the four categories
ax: the axis on which the label should be applied
rotation: the rotation list for each side
"""
categories = _categories_level(list(iterkeys(rects)))
if len(categories) > 4:
msg = ("maximum of 4 level supported for axes labeling..and 4"
"is alreay a lot of level, are you sure you need them all?")
raise NotImplementedError(msg)
labels = {}
#keep it fixed as will be used a lot of times
items = list(iteritems(rects))
vertical = not horizontal
#get the axis ticks and labels locator to put the correct values!
ax2 = ax.twinx()
ax3 = ax.twiny()
#this is the order of execution for horizontal disposition
ticks_pos = [ax.set_xticks, ax.set_yticks, ax3.set_xticks, ax2.set_yticks]
ticks_lab = [ax.set_xticklabels, ax.set_yticklabels,
ax3.set_xticklabels, ax2.set_yticklabels]
#for the vertical one, rotate it by one
if vertical:
ticks_pos = ticks_pos[1:] + ticks_pos[:1]
ticks_lab = ticks_lab[1:] + ticks_lab[:1]
#clean them
for pos, lab in zip(ticks_pos, ticks_lab):
pos([])
lab([])
#for each level, for each value in the level, take the mean of all
#the sublevel that correspond to that partial key
for level_idx, level in enumerate(categories):
#this dictionary keep the labels only for this level
level_ticks = dict()
for value in level:
#to which level it should refer to get the preceding
#values of labels? it's rather a tricky question...
#this is dependent on the side. It's a very crude management
#but I couldn't think a more general way...
if horizontal:
if level_idx == 3:
index_select = [-1, -1, -1]
else:
index_select = [+0, -1, -1]
else:
if level_idx == 3:
index_select = [+0, -1, +0]
else:
index_select = [-1, -1, -1]
#now I create the base key name and append the current value
#It will search on all the rects to find the corresponding one
#and use them to evaluate the mean position
basekey = tuple(categories[i][index_select[i]]
for i in range(level_idx))
basekey = basekey + (value,)
subset = dict((k, v) for k, v in items
if basekey == k[:level_idx + 1])
#now I extract the center of all the tiles and make a weighted
#mean of all these center on the area of the tile
#this should give me the (more or less) correct position
#of the center of the category
vals = list(itervalues(subset))
W = sum(w * h for (x, y, w, h) in vals)
x_lab = sum(_get_position(x, w, h, W) for (x, y, w, h) in vals)
y_lab = sum(_get_position(y, h, w, W) for (x, y, w, h) in vals)
#now base on the ordering, select which position to keep
#needs to be written in a more general form of 4 level are enough?
#should give also the horizontal and vertical alignment
side = (level_idx + vertical) % 4
level_ticks[value] = y_lab if side % 2 else x_lab
#now we add the labels of this level to the correct axis
ticks_pos[level_idx](list(itervalues(level_ticks)))
ticks_lab[level_idx](list(iterkeys(level_ticks)),
rotation=rotation[level_idx])
return labels
def mosaic(data, index=None, ax=None, horizontal=True, gap=0.005,
properties=lambda key: None, labelizer=None,
title='', statistic=False, axes_label=True,
label_rotation=0.0):
"""Create a mosaic plot from a contingency table.
It allows to visualize multivariate categorical data in a rigorous
and informative way.
Parameters
----------
data : dict, pandas.Series, np.ndarray, pandas.DataFrame
The contingency table that contains the data.
Each category should contain a non-negative number
with a tuple as index. It expects that all the combination
of keys to be representes; if that is not true, will
automatically consider the missing values as 0. The order
of the keys will be the same as the one of insertion.
If a dict of a Series (or any other dict like object)
is used, it will take the keys as labels. If a
np.ndarray is provided, it will generate a simple
numerical labels.
index: list, optional
Gives the preferred order for the category ordering. If not specified
will default to the given order. It doesn't support named indexes
for hierarchical Series. If a DataFrame is provided, it expects
a list with the name of the columns.
ax : matplotlib.Axes, optional
The graph where display the mosaic. If not given, will
create a new figure
horizontal : bool, optional (default True)
The starting direction of the split (by default along
the horizontal axis)
gap : float or array of floats
The list of gaps to be applied on each subdivision.
If the lenght of the given array is less of the number
of subcategories (or if it's a single number) it will extend
it with exponentially decreasing gaps
labelizer : function (key) -> string, optional
A function that generate the text to display at the center of
each tile base on the key of that tile
properties : function (key) -> dict, optional
A function that for each tile in the mosaic take the key
of the tile and returns the dictionary of properties
of the generated Rectangle, like color, hatch or similar.
A default properties set will be provided fot the keys whose
color has not been defined, and will use color variation to help
visually separates the various categories. It should return None
to indicate that it should use the default property for the tile.
A dictionary of the properties for each key can be passed,
and it will be internally converted to the correct function
statistic: bool, optional (default False)
if true will use a crude statistical model to give colors to the plot.
If the tile has a containt that is more than 2 standard deviation
from the expected value under independence hipotesys, it will
go from green to red (for positive deviations, blue otherwise) and
will acquire an hatching when crosses the 3 sigma.
title: string, optional
The title of the axis
axes_label: boolean, optional
Show the name of each value of each category
on the axis (default) or hide them.
label_rotation: float or list of float
the rotation of the axis label (if present). If a list is given
each axis can have a different rotation
Returns
----------
fig : matplotlib.Figure
The generate figure
rects : dict
A dictionary that has the same keys of the original
dataset, that holds a reference to the coordinates of the
tile and the Rectangle that represent it
See Also
----------
A Brief History of the Mosaic Display
Michael Friendly, York University, Psychology Department
Journal of Computational and Graphical Statistics, 2001
Mosaic Displays for Loglinear Models.
Michael Friendly, York University, Psychology Department
Proceedings of the Statistical Graphics Section, 1992, 61-68.
Mosaic displays for multi-way contingecy tables.
Michael Friendly, York University, Psychology Department
Journal of the american statistical association
March 1994, Vol. 89, No. 425, Theory and Methods
Examples
----------
The most simple use case is to take a dictionary and plot the result
>>> data = {'a': 10, 'b': 15, 'c': 16}
>>> mosaic(data, title='basic dictionary')
>>> pylab.show()
A more useful example is given by a dictionary with multiple indices.
In this case we use a wider gap to a better visual separation of the
resulting plot
>>> data = {('a', 'b'): 1, ('a', 'c'): 2, ('d', 'b'): 3, ('d', 'c'): 4}
>>> mosaic(data, gap=0.05, title='complete dictionary')
>>> pylab.show()
The same data can be given as a simple or hierarchical indexed Series
>>> rand = np.random.random
>>> from itertools import product
>>>
>>> tuples = list(product(['bar', 'baz', 'foo', 'qux'], ['one', 'two']))
>>> index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second'])
>>> data = pd.Series(rand(8), index=index)
>>> mosaic(data, title='hierarchical index series')
>>> pylab.show()
The third accepted data structureis the np array, for which a
very simple index will be created.
>>> rand = np.random.random
>>> data = 1+rand((2,2))
>>> mosaic(data, title='random non-labeled array')
>>> pylab.show()
If you need to modify the labeling and the coloring you can give
a function tocreate the labels and one with the graphical properties
starting from the key tuple
>>> data = {'a': 10, 'b': 15, 'c': 16}
>>> props = lambda key: {'color': 'r' if 'a' in key else 'gray'}
>>> labelizer = lambda k: {('a',): 'first', ('b',): 'second',
('c',): 'third'}[k]
>>> mosaic(data, title='colored dictionary',
properties=props, labelizer=labelizer)
>>> pylab.show()
Using a DataFrame as source, specifying the name of the columns of interest
>>> gender = ['male', 'male', 'male', 'female', 'female', 'female']
>>> pet = ['cat', 'dog', 'dog', 'cat', 'dog', 'cat']
>>> data = pandas.DataFrame({'gender': gender, 'pet': pet})
>>> mosaic(data, ['pet', 'gender'])
>>> pylab.show()
"""
if isinstance(data, DataFrame) and index is None:
raise ValueError("You must pass an index if data is a DataFrame."
" See examples.")
from pylab import Rectangle
fig, ax = utils.create_mpl_ax(ax)
# normalize the data to a dict with tuple of strings as keys
data = _normalize_data(data, index)
# split the graph into different areas
rects = _hierarchical_split(data, horizontal=horizontal, gap=gap)
# if there is no specified way to create the labels
# create a default one
if labelizer is None:
labelizer = lambda k: "\n".join(k)
if statistic:
default_props = _statistical_coloring(data)
else:
default_props = _create_default_properties(data)
if isinstance(properties, dict):
color_dict = properties
properties = lambda key: color_dict.get(key, None)
for k, v in iteritems(rects):
# create each rectangle and put a label on it
x, y, w, h = v
conf = properties(k)
props = conf if conf else default_props[k]
text = labelizer(k)
Rect = Rectangle((x, y), w, h, label=text, **props)
ax.add_patch(Rect)
ax.text(x + w / 2, y + h / 2, text, ha='center',
va='center', size='smaller')
#creating the labels on the axis
#o clearing it
if axes_label:
if np.iterable(label_rotation):
rotation = label_rotation
else:
rotation = [label_rotation] * 4
labels = _create_labels(rects, horizontal, ax, rotation)
else:
ax.set_xticks([])
ax.set_xticklabels([])
ax.set_yticks([])
ax.set_yticklabels([])
ax.set_title(title)
return fig, rects
| bsd-3-clause |
shusenl/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 65 | 50308 | # Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import scipy.sparse as sp
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (t + t0) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |
tgsmith61591/skutil | skutil/odr/dqrutl.py | 1 | 5910 | from __future__ import print_function, division, absolute_import
import numpy as np
from skutil.odr import dqrsl # what happens if we make this absolute?
from sklearn.utils import check_array
from sklearn.base import BaseEstimator
from numpy.linalg import matrix_rank
# WARNING: there is little-to-no validation of input in these functions,
# and crashes may be caused by inappropriate usage. Use with care...
__all__ = [
'qr_decomposition',
'QRDecomposition'
]
def _validate_matrix_size(n, p):
if n * p > 2147483647:
raise ValueError('too many elements for Fortran LINPACK routine')
def _safecall(fun, *args, **kwargs):
"""A method to call a LAPACK or LINPACK subroutine internally"""
fun(*args, **kwargs)
def qr_decomposition(X, job=1):
"""Performs the QR decomposition using LINPACK, BLAS and LAPACK
Fortran subroutines.
Parameters
----------
X : array_like, shape (n_samples, n_features)
The matrix to decompose
job : int, optional (default=1)
Whether to perform pivoting. 0 is False, any other value
will be coerced to 1 (True).
Returns
-------
X : np.ndarray, shape=(n_samples, n_features)
The matrix
rank : int
The rank of the matrix
qraux : np.ndarray, shape=(n_features,)
Contains further information required to recover
the orthogonal part of the decomposition.
pivot : np.ndarray, shape=(n_features,)
The pivot array, or None if not ``job``
"""
X = check_array(X, dtype='numeric', order='F', copy=True)
n, p = X.shape
# check on size
_validate_matrix_size(n, p)
rank = matrix_rank(X)
# validate job:
job_ = 0 if not job else 1
qraux, pivot, work = (np.zeros(p, dtype=np.double, order='F'),
# can't use arange, because need fortran order ('order' not kw in arange)
np.array([i for i in range(1, p + 1)], dtype=np.int, order='F'),
np.zeros(p, dtype=np.double, order='F'))
# sanity checks
assert qraux.shape[0] == p, 'expected qraux to be of length %i' % p
assert pivot.shape[0] == p, 'expected pivot to be of length %i' % p
assert work.shape[0] == p, 'expected work to be of length %i' % p
# call the fortran module IN PLACE
_safecall(dqrsl.dqrdc, X, n, n, p, qraux, pivot, work, job_)
# do returns
return (X,
rank,
qraux,
(pivot - 1) if job_ else None) # subtract one because pivot started at 1 for the fortran
def _qr_R(qr):
"""Extract the R matrix from a QR decomposition"""
min_dim = min(qr.shape)
return qr[:min_dim + 1, :]
class QRDecomposition(BaseEstimator):
"""Performs the QR decomposition using LINPACK, BLAS and LAPACK
Fortran subroutines, and provides an interface for other useful
QR utility methods.
Parameters
----------
X : array_like, shape (n_samples, n_features)
The matrix to decompose
pivot : int, optional (default=1)
Whether to perform pivoting. 0 is False, any other value
will be coerced to 1 (True).
Attributes
----------
qr : array_like, shape (n_samples, n_features)
The decomposed matrix
qraux : array_like, shape (n_features,)
Contains further information required to recover
the orthogonal part of the decomposition.
pivot : array_like, shape (n_features,)
The pivots, if pivot was set to 1, else None
rank : int
The rank of the input matrix
"""
def __init__(self, X, pivot=1):
self.job_ = 0 if not pivot else 1
self._decompose(X)
def _decompose(self, X):
"""Decomposes the matrix"""
# perform the decomposition
self.qr, self.rank, self.qraux, self.pivot = qr_decomposition(X, self.job_)
def get_coef(self, X):
qr, qraux = self.qr, self.qraux
n, p = qr.shape
# sanity check
assert isinstance(qr, np.ndarray), 'internal error: QR should be a np.ndarray but got %s' % type(qr)
assert isinstance(qraux, np.ndarray), 'internal error: qraux should be a np.ndarray but got %s' % type(qraux)
# validate input array
X = check_array(X, dtype='numeric', copy=True, order='F')
nx, ny = X.shape
if nx != n:
raise ValueError('qr and X must have same number of rows')
# check on size
_validate_matrix_size(n, p)
# get the rank of the decomposition
k = self.rank
# get ix vector
# if p > n:
# ix = np.ones(n + (p - n)) * np.nan
# ix[:n] = np.arange(n) # i.e., array([0,1,2,nan,nan,nan])
# else:
# ix = np.arange(n)
# set up the structures to alter
coef, info = (np.zeros((k, ny), dtype=np.double, order='F'),
np.zeros(1, dtype=np.int, order='F'))
# call the fortran module IN PLACE
_safecall(dqrsl.dqrcf, qr, n, k, qraux, X, ny, coef, 0)
# post-processing
# if k < p:
# cf = np.ones((p,ny)) * np.nan
# cf[self.pivot[np.arange(k)], :] = coef
return coef if not k < p else coef[self.pivot[np.arange(k)], :]
def get_rank(self):
"""Get the rank of the decomposition.
Returns
-------
self.rank : int
The rank of the decomposition
"""
return self.rank
def get_R(self):
"""Get the R matrix from the decomposition.
Returns
-------
r : np.ndarray
The R portion of the decomposed matrix.
"""
r = _qr_R(self.qr)
return r
def get_R_rank(self):
"""Get the rank of the R matrix.
Returns
-------
rank : int
The rank of the R matrix
"""
return matrix_rank(self.get_R())
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.