repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
votti/PyLaTeX | examples/basic.py | 3 | 1310 | #!/usr/bin/python
"""
This example shows matplotlib functionality.
.. :copyright: (c) 2014 by Jelte Fennema.
:license: MIT, see License for more details.
"""
# begin-doc-include
from pylatex import Document, Section, Subsection
from pylatex.utils import italic, escape_latex
def fill_document(doc):
"""Add a section, a subsection and some text to the document.
:param doc: the document
:type doc: :class:`pylatex.document.Document` instance
"""
with doc.create(Section('A section')):
doc.append('Some regular text and some ' + italic('italic text. '))
with doc.create(Subsection('A subsection')):
doc.append(escape_latex('Also some crazy characters: $&#{}'))
if __name__ == '__main__':
# Basic document
doc = Document('basic')
fill_document(doc)
doc.generate_pdf()
doc.generate_tex()
# Document with `\maketitle` command activated
doc = Document(author='Author', date='01/01/01', title='Title',
maketitle=True)
fill_document(doc)
doc.generate_pdf('basic_maketitle', clean=False)
# Add stuff to the document
doc.append(Section('A second section'))
doc.append('Some text.')
doc.generate_pdf('basic_maketitle2')
tex = doc.dumps() # The document as string in LaTeX syntax
| mit |
navijo/FlOYBD | Flask/utils/generalFunctions.py | 2 | 5672 | from utils.cylinders import CylindersKml
from utils.cylindersExt import CylindersKmlExtended
from cassandra.cluster import Cluster
from utils import sparkFunctions
from requests.packages.urllib3.exceptions import InsecureRequestWarning
import pandas as pd
import json
import datetime
import time
import requests
import logging
FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(format=FORMAT,level=logging.INFO)
logger = logging.getLogger('sparkFunctions')
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
def saveKEY(date, key):
cluster = Cluster(['192.168.246.236'])
session = cluster.connect("dev")
valid_date = date + datetime.timedelta(days=90)
session.execute("TRUNCATE api_key")
session.execute("INSERT INTO api_key (creation_date,valid_until,\"apiKey\") VALUES (%s, %s, %s)"
, [date, valid_date, key])
def getKey():
cluster = Cluster(['192.168.246.236'])
session = cluster.connect("dev")
rows = session.execute('SELECT * FROM api_key')
apiKey = ''
jsonList = []
jsonData = {}
for row in rows:
jsonData['creation_date'] = str(row[0].strftime("%Y-%m-%d %H:%M:%S"))
jsonData['api_key'] = row[1]
jsonData['valid_until'] = str(row[2].strftime("%Y-%m-%d %H:%M:%S"))
jsonList.append(jsonData)
return jsonList
def getApiKey():
cluster = Cluster(['192.168.246.236'])
session = cluster.connect("dev")
rows = session.execute('SELECT * FROM api_key')
apiKey = ''
for row in rows:
apiKey = row[1]
return apiKey
def dataframeToJson(dataFrame):
return dataFrame.toPandas().to_json(orient='records', lines=True)
def dataFrameToJsonStr(dataFrame):
return dataFrame.toPandas().reset_index().to_json(path_or_buf=None, orient='records')
def generateAllStationsKml(weatherData, stations, fileName):
weatherJsonData = dataFrameToJsonStr(weatherData)
weatherJsonData = json.loads(weatherJsonData)
finalData = []
jsonString = []
for row in weatherJsonData:
stationData = sparkFunctions.getStationInfo(stations, row.get("station_id"))
stationJsonData = dataframeToJson(stationData)
preparedData = prepareJson(json.dumps(row), stationJsonData)
jsonString.append(preparedData)
finalData.append(jsonString)
cilinders = CylindersKmlExtended(fileName, finalData)
cilinders.makeKMZWithTourAndRotation()
def generateKml(weatherData, stationData, fileName):
finalData = []
weatherJsonData = dataframeToJson(weatherData)
stationJsonData = dataframeToJson(stationData)
jsonString = prepareJson(weatherJsonData, stationJsonData)
finalData.append(jsonString)
cilinders = CylindersKml(fileName, finalData)
cilinders.makeKMLWithTourAndRotation()
def prepareJson(weatherData, stationData):
stationData = json.loads(stationData)
latitude = stationData["latitude"]
longitude = stationData["longitude"]
coordinates = {"lat": latitude, "lng": longitude}
name = stationData["name"]
calculatedData = json.loads(weatherData)
maxTemp = calculatedData["max_temp"]
medTemp = calculatedData["med_temp"]
minTemp = calculatedData["min_temp"]
temps = [maxTemp, medTemp, minTemp]
finalData = {"name": name, "description": temps, "coordinates": coordinates, "extra": ""}
return finalData
def getCurrentWeather(station_id):
logger.info("Getting current weather for station: " + station_id)
global api_key, querystring, headers, base_url
api_key = getApiKey()
querystring = {"api_key": api_key}
headers = {'cache-control': "no-cache"}
base_url = "https://opendata.aemet.es/opendata"
currentWeather = getData(base_url + "/api/observacion/convencional/datos/estacion/" + station_id)
parsedCurrentWeatherJson = {}
if currentWeather != 0 and currentWeather is not None:
precip = currentWeather[0].get("prec")
min_temp = currentWeather[0].get("tamin")
max_temp = currentWeather[0].get("tamax")
max_pres = currentWeather[0].get("pres")
insolation = currentWeather[0].get("inso")
parsedCurrentWeatherJson["station_id"] = station_id
parsedCurrentWeatherJson["precip"] = precip
parsedCurrentWeatherJson["max_temp"] = max_temp
parsedCurrentWeatherJson["med_temp"] = (float(max_temp) + float(min_temp)) / 2
parsedCurrentWeatherJson["min_temp"] = min_temp
parsedCurrentWeatherJson["max_pres"] = max_pres
parsedCurrentWeatherJson["min_pres"] = max_pres
parsedCurrentWeatherJson["insolation"] = insolation if insolation is not None else 0
return parsedCurrentWeatherJson
def getData(url):
""" Make the request to the api """
try:
response = requests.request("GET", url, headers=headers, params=querystring, verify=False)
if response:
jsonResponse = response.json()
if jsonResponse.get('estado') == 200:
link = jsonResponse.get('datos')
data = requests.request("GET", link, verify=False)
if (data.status_code == 200):
return data.json()
else:
return 0
elif jsonResponse.get('estado') == 429:
# Sleep until next minute
logger.error("####Sleeping")
time.sleep(60)
logger.error("####Waked up!!")
return getData(url)
except requests.exceptions.ConnectionError:
logger.error("####ERROR!! => Sleeping")
time.sleep(120)
logger.error("####Waked up!!")
return getData(url)
| mit |
maaskola/GPy | GPy/core/parameterization/transformations.py | 10 | 20673 | # Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from .domains import _POSITIVE,_NEGATIVE, _BOUNDED
import weakref
import sys
_exp_lim_val = np.finfo(np.float64).max
_lim_val = 36.0
epsilon = np.finfo(np.float64).resolution
#===============================================================================
# Fixing constants
__fixed__ = "fixed"
FIXED = False
UNFIXED = True
#===============================================================================
class Transformation(object):
domain = None
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance or cls._instance.__class__ is not cls:
cls._instance = super(Transformation, cls).__new__(cls, *args, **kwargs)
return cls._instance
def f(self, opt_param):
raise NotImplementedError
def finv(self, model_param):
raise NotImplementedError
def log_jacobian(self, model_param):
"""
compute the log of the jacobian of f, evaluated at f(x)= model_param
"""
raise NotImplementedError
def log_jacobian_grad(self, model_param):
"""
compute the drivative of the log of the jacobian of f, evaluated at f(x)= model_param
"""
raise NotImplementedError
def gradfactor(self, model_param, dL_dmodel_param):
""" df(opt_param)_dopt_param evaluated at self.f(opt_param)=model_param, times the gradient dL_dmodel_param,
i.e.:
define
.. math::
\frac{\frac{\partial L}{\partial f}\left(\left.\partial f(x)}{\partial x}\right|_{x=f^{-1}(f)\right)}
"""
raise NotImplementedError
def gradfactor_non_natural(self, model_param, dL_dmodel_param):
return self.gradfactor(model_param, dL_dmodel_param)
def initialize(self, f):
""" produce a sensible initial value for f(x)"""
raise NotImplementedError
def plot(self, xlabel=r'transformed $\theta$', ylabel=r'$\theta$', axes=None, *args,**kw):
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
import matplotlib.pyplot as plt
from ...plotting.matplot_dep import base_plots
x = np.linspace(-8,8)
base_plots.meanplot(x, self.f(x), *args, ax=axes, **kw)
axes = plt.gca()
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
def __str__(self):
raise NotImplementedError
def __repr__(self):
return self.__class__.__name__
class Logexp(Transformation):
domain = _POSITIVE
def f(self, x):
return np.where(x>_lim_val, x, np.log1p(np.exp(np.clip(x, -_lim_val, _lim_val)))) + epsilon
#raises overflow warning: return np.where(x>_lim_val, x, np.log(1. + np.exp(x)))
def finv(self, f):
return np.where(f>_lim_val, f, np.log(np.exp(f+1e-20) - 1.))
def gradfactor(self, f, df):
return np.einsum('i,i->i', df, np.where(f>_lim_val, 1., 1. - np.exp(-f)))
def initialize(self, f):
if np.any(f < 0.):
print("Warning: changing parameters to satisfy constraints")
return np.abs(f)
def log_jacobian(self, model_param):
return np.where(model_param>_lim_val, model_param, np.log(np.exp(model_param+1e-20) - 1.)) - model_param
def log_jacobian_grad(self, model_param):
return 1./(np.exp(model_param)-1.)
def __str__(self):
return '+ve'
class Exponent(Transformation):
domain = _POSITIVE
def f(self, x):
return np.where(x<_lim_val, np.where(x>-_lim_val, np.exp(x), np.exp(-_lim_val)), np.exp(_lim_val))
def finv(self, x):
return np.log(x)
def gradfactor(self, f, df):
return np.einsum('i,i->i', df, f)
def initialize(self, f):
if np.any(f < 0.):
print("Warning: changing parameters to satisfy constraints")
return np.abs(f)
def log_jacobian(self, model_param):
return np.log(model_param)
def log_jacobian_grad(self, model_param):
return 1./model_param
def __str__(self):
return '+ve'
class NormalTheta(Transformation):
"Do not use, not officially supported!"
_instances = []
def __new__(cls, mu_indices=None, var_indices=None):
"Do not use, not officially supported!"
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if np.all(instance().mu_indices==mu_indices, keepdims=False) and np.all(instance().var_indices==var_indices, keepdims=False):
return instance()
o = super(Transformation, cls).__new__(cls, mu_indices, var_indices)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, mu_indices, var_indices):
self.mu_indices = mu_indices
self.var_indices = var_indices
def f(self, theta):
# In here abs is only a trick to make sure the numerics are ok.
# The variance will never go below zero, but at initialization we need to make sure
# that the values are ok
# Before:
theta[self.var_indices] = np.abs(-.5/theta[self.var_indices])
#theta[self.var_indices] = np.exp(-.5/theta[self.var_indices])
theta[self.mu_indices] *= theta[self.var_indices]
return theta # which is now {mu, var}
def finv(self, muvar):
# before:
varp = muvar[self.var_indices]
muvar[self.mu_indices] /= varp
muvar[self.var_indices] = -.5/varp
#muvar[self.var_indices] = -.5/np.log(varp)
return muvar # which is now {theta1, theta2}
def gradfactor(self, muvar, dmuvar):
mu = muvar[self.mu_indices]
var = muvar[self.var_indices]
#=======================================================================
# theta gradients
# This works and the gradient checks!
dmuvar[self.mu_indices] *= var
dmuvar[self.var_indices] *= 2*(var)**2
dmuvar[self.var_indices] += 2*dmuvar[self.mu_indices]*mu
#=======================================================================
return dmuvar # which is now the gradient multiplicator for {theta1, theta2}
def initialize(self, f):
if np.any(f[self.var_indices] < 0.):
print("Warning: changing parameters to satisfy constraints")
f[self.var_indices] = np.abs(f[self.var_indices])
return f
def __str__(self):
return "theta"
def __getstate__(self):
return [self.mu_indices, self.var_indices]
def __setstate__(self, state):
self.mu_indices = state[0]
self.var_indices = state[1]
class NormalNaturalAntti(NormalTheta):
"Do not use, not officially supported!"
_instances = []
def __new__(cls, mu_indices=None, var_indices=None):
"Do not use, not officially supported!"
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if np.all(instance().mu_indices==mu_indices, keepdims=False) and np.all(instance().var_indices==var_indices, keepdims=False):
return instance()
o = super(Transformation, cls).__new__(cls, mu_indices, var_indices)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, mu_indices, var_indices):
self.mu_indices = mu_indices
self.var_indices = var_indices
def gradfactor(self, muvar, dmuvar):
mu = muvar[self.mu_indices]
var = muvar[self.var_indices]
#=======================================================================
# theta gradients
# This works and the gradient checks!
dmuvar[self.mu_indices] *= var
dmuvar[self.var_indices] *= 2*var**2#np.einsum('i,i,i,i->i', dmuvar[self.var_indices], [2], var, var)
#=======================================================================
return dmuvar # which is now the gradient multiplicator
def initialize(self, f):
if np.any(f[self.var_indices] < 0.):
print("Warning: changing parameters to satisfy constraints")
f[self.var_indices] = np.abs(f[self.var_indices])
return f
def __str__(self):
return "natantti"
class NormalEta(Transformation):
"Do not use, not officially supported!"
_instances = []
def __new__(cls, mu_indices=None, var_indices=None):
"Do not use, not officially supported!"
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if np.all(instance().mu_indices==mu_indices, keepdims=False) and np.all(instance().var_indices==var_indices, keepdims=False):
return instance()
o = super(Transformation, cls).__new__(cls, mu_indices, var_indices)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, mu_indices, var_indices):
self.mu_indices = mu_indices
self.var_indices = var_indices
def f(self, theta):
theta[self.var_indices] = np.abs(theta[self.var_indices] - theta[self.mu_indices]**2)
return theta # which is now {mu, var}
def finv(self, muvar):
muvar[self.var_indices] += muvar[self.mu_indices]**2
return muvar # which is now {eta1, eta2}
def gradfactor(self, muvar, dmuvar):
mu = muvar[self.mu_indices]
#=======================================================================
# Lets try natural gradients instead: Not working with bfgs... try stochastic!
dmuvar[self.mu_indices] -= 2*mu*dmuvar[self.var_indices]
#=======================================================================
return dmuvar # which is now the gradient multiplicator
def initialize(self, f):
if np.any(f[self.var_indices] < 0.):
print("Warning: changing parameters to satisfy constraints")
f[self.var_indices] = np.abs(f[self.var_indices])
return f
def __str__(self):
return "eta"
class NormalNaturalThroughTheta(NormalTheta):
"Do not use, not officially supported!"
_instances = []
def __new__(cls, mu_indices=None, var_indices=None):
"Do not use, not officially supported!"
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if np.all(instance().mu_indices==mu_indices, keepdims=False) and np.all(instance().var_indices==var_indices, keepdims=False):
return instance()
o = super(Transformation, cls).__new__(cls, mu_indices, var_indices)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, mu_indices, var_indices):
self.mu_indices = mu_indices
self.var_indices = var_indices
def gradfactor(self, muvar, dmuvar):
mu = muvar[self.mu_indices]
var = muvar[self.var_indices]
#=======================================================================
# This is just eta direction:
dmuvar[self.mu_indices] -= 2*mu*dmuvar[self.var_indices]
#=======================================================================
#=======================================================================
# This is by going through theta fully and then going into eta direction:
#dmu = dmuvar[self.mu_indices]
#dmuvar[self.var_indices] += dmu*mu*(var + 4/var)
#=======================================================================
return dmuvar # which is now the gradient multiplicator
def gradfactor_non_natural(self, muvar, dmuvar):
mu = muvar[self.mu_indices]
var = muvar[self.var_indices]
#=======================================================================
# theta gradients
# This works and the gradient checks!
dmuvar[self.mu_indices] *= var
dmuvar[self.var_indices] *= 2*(var)**2
dmuvar[self.var_indices] += 2*dmuvar[self.mu_indices]*mu
#=======================================================================
return dmuvar # which is now the gradient multiplicator for {theta1, theta2}
def __str__(self):
return "natgrad"
class NormalNaturalWhooot(NormalTheta):
"Do not use, not officially supported!"
_instances = []
def __new__(cls, mu_indices=None, var_indices=None):
"Do not use, not officially supported!"
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if np.all(instance().mu_indices==mu_indices, keepdims=False) and np.all(instance().var_indices==var_indices, keepdims=False):
return instance()
o = super(Transformation, cls).__new__(cls, mu_indices, var_indices)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, mu_indices, var_indices):
self.mu_indices = mu_indices
self.var_indices = var_indices
def gradfactor(self, muvar, dmuvar):
#mu = muvar[self.mu_indices]
#var = muvar[self.var_indices]
#=======================================================================
# This is just eta direction:
#dmuvar[self.mu_indices] -= 2*mu*dmuvar[self.var_indices]
#=======================================================================
#=======================================================================
# This is by going through theta fully and then going into eta direction:
#dmu = dmuvar[self.mu_indices]
#dmuvar[self.var_indices] += dmu*mu*(var + 4/var)
#=======================================================================
return dmuvar # which is now the gradient multiplicator
def __str__(self):
return "natgrad"
class NormalNaturalThroughEta(NormalEta):
"Do not use, not officially supported!"
_instances = []
def __new__(cls, mu_indices=None, var_indices=None):
"Do not use, not officially supported!"
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if np.all(instance().mu_indices==mu_indices, keepdims=False) and np.all(instance().var_indices==var_indices, keepdims=False):
return instance()
o = super(Transformation, cls).__new__(cls, mu_indices, var_indices)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, mu_indices, var_indices):
self.mu_indices = mu_indices
self.var_indices = var_indices
def gradfactor(self, muvar, dmuvar):
mu = muvar[self.mu_indices]
var = muvar[self.var_indices]
#=======================================================================
# theta gradients
# This works and the gradient checks!
dmuvar[self.mu_indices] *= var
dmuvar[self.var_indices] *= 2*(var)**2
dmuvar[self.var_indices] += 2*dmuvar[self.mu_indices]*mu
#=======================================================================
return dmuvar
def __str__(self):
return "natgrad"
class LogexpNeg(Transformation):
domain = _POSITIVE
def f(self, x):
return np.where(x>_lim_val, -x, -np.log(1. + np.exp(np.clip(x, -np.inf, _lim_val))))
#raises overflow warning: return np.where(x>_lim_val, x, np.log(1. + np.exp(x)))
def finv(self, f):
return np.where(f>_lim_val, 0, np.log(np.exp(-f) - 1.))
def gradfactor(self, f, df):
return np.einsum('i,i->i', df, np.where(f>_lim_val, -1, -1 + np.exp(-f)))
def initialize(self, f):
if np.any(f < 0.):
print("Warning: changing parameters to satisfy constraints")
return np.abs(f)
def __str__(self):
return '+ve'
class NegativeLogexp(Transformation):
domain = _NEGATIVE
logexp = Logexp()
def f(self, x):
return -self.logexp.f(x) # np.log(1. + np.exp(x))
def finv(self, f):
return self.logexp.finv(-f) # np.log(np.exp(-f) - 1.)
def gradfactor(self, f, df):
return np.einsum('i,i->i', df, -self.logexp.gradfactor(-f))
def initialize(self, f):
return -self.logexp.initialize(f) # np.abs(f)
def __str__(self):
return '-ve'
class LogexpClipped(Logexp):
max_bound = 1e100
min_bound = 1e-10
log_max_bound = np.log(max_bound)
log_min_bound = np.log(min_bound)
domain = _POSITIVE
_instances = []
def __new__(cls, lower=1e-6, *args, **kwargs):
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if instance().lower == lower:
return instance()
o = super(Transformation, cls).__new__(cls, lower, *args, **kwargs)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, lower=1e-6):
self.lower = lower
def f(self, x):
exp = np.exp(np.clip(x, self.log_min_bound, self.log_max_bound))
f = np.log(1. + exp)
# if np.isnan(f).any():
# import ipdb;ipdb.set_trace()
return np.clip(f, self.min_bound, self.max_bound)
def finv(self, f):
return np.log(np.exp(f - 1.))
def gradfactor(self, f, df):
ef = np.exp(f) # np.clip(f, self.min_bound, self.max_bound))
gf = (ef - 1.) / ef
return np.einsum('i,i->i', df, gf) # np.where(f < self.lower, 0, gf)
def initialize(self, f):
if np.any(f < 0.):
print("Warning: changing parameters to satisfy constraints")
return np.abs(f)
def __str__(self):
return '+ve_c'
class NegativeExponent(Exponent):
domain = _NEGATIVE
def f(self, x):
return -Exponent.f(x)
def finv(self, f):
return Exponent.finv(-f)
def gradfactor(self, f, df):
return np.einsum('i,i->i', df, f)
def initialize(self, f):
return -Exponent.initialize(f) #np.abs(f)
def __str__(self):
return '-ve'
class Square(Transformation):
domain = _POSITIVE
def f(self, x):
return x ** 2
def finv(self, x):
return np.sqrt(x)
def gradfactor(self, f, df):
return np.einsum('i,i->i', df, 2 * np.sqrt(f))
def initialize(self, f):
return np.abs(f)
def __str__(self):
return '+sq'
class Logistic(Transformation):
domain = _BOUNDED
_instances = []
def __new__(cls, lower=1e-6, upper=1e-6, *args, **kwargs):
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if instance().lower == lower and instance().upper == upper:
return instance()
newfunc = super(Transformation, cls).__new__
if newfunc is object.__new__:
o = newfunc(cls)
else:
o = newfunc(cls, lower, upper, *args, **kwargs)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, lower, upper):
assert lower < upper
self.lower, self.upper = float(lower), float(upper)
self.difference = self.upper - self.lower
def f(self, x):
if (x<-300.).any():
x = x.copy()
x[x<-300.] = -300.
return self.lower + self.difference / (1. + np.exp(-x))
def finv(self, f):
return np.log(np.clip(f - self.lower, 1e-10, np.inf) / np.clip(self.upper - f, 1e-10, np.inf))
def gradfactor(self, f, df):
return np.einsum('i,i->i', df, (f - self.lower) * (self.upper - f) / self.difference)
def initialize(self, f):
if np.any(np.logical_or(f < self.lower, f > self.upper)):
print("Warning: changing parameters to satisfy constraints")
#return np.where(np.logical_or(f < self.lower, f > self.upper), self.f(f * 0.), f)
#FIXME: Max, zeros_like right?
return np.where(np.logical_or(f < self.lower, f > self.upper), self.f(np.zeros_like(f)), f)
def __str__(self):
return '{},{}'.format(self.lower, self.upper)
| bsd-3-clause |
madjelan/scikit-learn | benchmarks/bench_plot_approximate_neighbors.py | 85 | 6377 | """
Benchmark for approximate nearest neighbor search using
locality sensitive hashing forest.
There are two types of benchmarks.
First, accuracy of LSHForest queries are measured for various
hyper-parameters and index sizes.
Second, speed up of LSHForest queries compared to brute force
method in exact nearest neighbors is measures for the
aforementioned settings. In general, speed up is increasing as
the index size grows.
"""
from __future__ import division
import numpy as np
from tempfile import gettempdir
from time import time
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors.approximate import LSHForest
from sklearn.datasets import make_blobs
from sklearn.externals.joblib import Memory
m = Memory(cachedir=gettempdir())
@m.cache()
def make_data(n_samples, n_features, n_queries, random_state=0):
"""Create index and query data."""
print('Generating random blob-ish data')
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=100,
shuffle=True, random_state=random_state)
# Keep the last samples as held out query vectors: note since we used
# shuffle=True we have ensured that index and query vectors are
# samples from the same distribution (a mixture of 100 gaussians in this
# case)
return X[:n_samples], X[n_samples:]
def calc_exact_neighbors(X, queries, n_queries, n_neighbors):
"""Measures average times for exact neighbor queries."""
print ('Building NearestNeighbors for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
average_time = 0
t0 = time()
neighbors = nbrs.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time = (time() - t0) / n_queries
return neighbors, average_time
def calc_accuracy(X, queries, n_queries, n_neighbors, exact_neighbors,
average_time_exact, **lshf_params):
"""Calculates accuracy and the speed up of LSHForest."""
print('Building LSHForest for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
lshf = LSHForest(**lshf_params)
t0 = time()
lshf.fit(X)
lshf_build_time = time() - t0
print('Done in %0.3fs' % lshf_build_time)
accuracy = 0
t0 = time()
approx_neighbors = lshf.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time_approx = (time() - t0) / n_queries
for i in range(len(queries)):
accuracy += np.in1d(approx_neighbors[i], exact_neighbors[i]).mean()
accuracy /= n_queries
speed_up = average_time_exact / average_time_approx
print('Average time for lshf neighbor queries: %0.3fs' %
average_time_approx)
print ('Average time for exact neighbor queries: %0.3fs' %
average_time_exact)
print ('Average Accuracy : %0.2f' % accuracy)
print ('Speed up: %0.1fx' % speed_up)
return speed_up, accuracy
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Initialize index sizes
n_samples = [int(1e3), int(1e4), int(1e5), int(1e6)]
n_features = int(1e2)
n_queries = 100
n_neighbors = 10
X_index, X_query = make_data(np.max(n_samples), n_features, n_queries,
random_state=0)
params_list = [{'n_estimators': 3, 'n_candidates': 50},
{'n_estimators': 5, 'n_candidates': 70},
{'n_estimators': 10, 'n_candidates': 100}]
accuracies = np.zeros((len(n_samples), len(params_list)), dtype=float)
speed_ups = np.zeros((len(n_samples), len(params_list)), dtype=float)
for i, sample_size in enumerate(n_samples):
print ('==========================================================')
print ('Sample size: %i' % sample_size)
print ('------------------------')
exact_neighbors, average_time_exact = calc_exact_neighbors(
X_index[:sample_size], X_query, n_queries, n_neighbors)
for j, params in enumerate(params_list):
print ('LSHF parameters: n_estimators = %i, n_candidates = %i' %
(params['n_estimators'], params['n_candidates']))
speed_ups[i, j], accuracies[i, j] = calc_accuracy(
X_index[:sample_size], X_query, n_queries, n_neighbors,
exact_neighbors, average_time_exact, random_state=0, **params)
print ('')
print ('==========================================================')
# Set labels for LSHForest parameters
colors = ['c', 'm', 'y']
p1 = plt.Rectangle((0, 0), 0.1, 0.1, fc=colors[0])
p2 = plt.Rectangle((0, 0), 0.1, 0.1, fc=colors[1])
p3 = plt.Rectangle((0, 0), 0.1, 0.1, fc=colors[2])
labels = ['n_estimators=' + str(params_list[0]['n_estimators']) +
', n_candidates=' + str(params_list[0]['n_candidates']),
'n_estimators=' + str(params_list[1]['n_estimators']) +
', n_candidates=' + str(params_list[1]['n_candidates']),
'n_estimators=' + str(params_list[2]['n_estimators']) +
', n_candidates=' + str(params_list[2]['n_candidates'])]
# Plot precision
plt.figure()
plt.legend((p1, p2, p3), (labels[0], labels[1], labels[2]),
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, accuracies[:, i], c=colors[i])
plt.plot(n_samples, accuracies[:, i], c=colors[i])
plt.ylim([0, 1.3])
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Precision@10")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Precision of first 10 neighbors with index size")
# Plot speed up
plt.figure()
plt.legend((p1, p2, p3), (labels[0], labels[1], labels[2]),
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, speed_ups[:, i], c=colors[i])
plt.plot(n_samples, speed_ups[:, i], c=colors[i])
plt.ylim(0, np.max(speed_ups))
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Speed up")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Relationship between Speed up and index size")
plt.show()
| bsd-3-clause |
AunShiLord/sympy | sympy/interactive/session.py | 1 | 15016 | """Tools for setting up interactive sessions. """
from __future__ import print_function, division
from sympy.external import import_module
from sympy.interactive.printing import init_printing
preexec_source = """\
from __future__ import division
from sympy import *
x, y, z, t = symbols('x y z t')
k, m, n = symbols('k m n', integer=True)
f, g, h = symbols('f g h', cls=Function)
init_printing()
"""
verbose_message = """\
These commands were executed:
%(source)s
Documentation can be found at http://docs.sympy.org/%(version)s
"""
no_ipython = """\
Couldn't locate IPython. Having IPython installed is greatly recommended.
See http://ipython.scipy.org for more details. If you use Debian/Ubuntu,
just install the 'ipython' package and start isympy again.
"""
def _make_message(ipython=True, quiet=False, source=None):
"""Create a banner for an interactive session. """
from sympy import __version__ as sympy_version
from sympy.polys.domains import GROUND_TYPES
from sympy.utilities.misc import ARCH
from sympy import SYMPY_DEBUG
import sys
import os
python_version = "%d.%d.%d" % sys.version_info[:3]
if ipython:
shell_name = "IPython"
else:
shell_name = "Python"
info = ['ground types: %s' % GROUND_TYPES]
cache = os.getenv('SYMPY_USE_CACHE')
if cache is not None and cache.lower() == 'no':
info.append('cache: off')
if SYMPY_DEBUG:
info.append('debugging: on')
args = shell_name, sympy_version, python_version, ARCH, ', '.join(info)
message = "%s console for SymPy %s (Python %s-%s) (%s)\n" % args
if not quiet:
if source is None:
source = preexec_source
_source = ""
for line in source.split('\n')[:-1]:
if not line:
_source += '\n'
else:
_source += '>>> ' + line + '\n'
doc_version = sympy_version
if doc_version.find('-git') >= 0:
doc_version = "dev"
else:
doc_version = "%s.%s.%s/" % tuple(doc_version.split('.')[:3])
message += '\n' + verbose_message % {'source': _source,
'version': doc_version}
return message
def int_to_Integer(s):
"""
Wrap integer literals with Integer.
This is based on the decistmt example from
http://docs.python.org/library/tokenize.html.
Only integer literals are converted. Float literals are left alone.
Example
=======
>>> from __future__ import division
>>> from sympy.interactive.session import int_to_Integer
>>> from sympy import Integer
>>> s = '1.2 + 1/2 - 0x12 + a1'
>>> int_to_Integer(s)
'1.2 +Integer (1 )/Integer (2 )-Integer (0x12 )+a1 '
>>> s = 'print (1/2)'
>>> int_to_Integer(s)
'print (Integer (1 )/Integer (2 ))'
>>> exec(s)
0.5
>>> exec(int_to_Integer(s))
1/2
"""
from tokenize import generate_tokens, untokenize, NUMBER, NAME, OP
from sympy.core.compatibility import StringIO
def _is_int(num):
"""
Returns true if string value num (with token NUMBER) represents an integer.
"""
# XXX: Is there something in the standard library that will do this?
if '.' in num or 'j' in num.lower() or 'e' in num.lower():
return False
return True
result = []
g = generate_tokens(StringIO(s).readline) # tokenize the string
for toknum, tokval, _, _, _ in g:
if toknum == NUMBER and _is_int(tokval): # replace NUMBER tokens
result.extend([
(NAME, 'Integer'),
(OP, '('),
(NUMBER, tokval),
(OP, ')')
])
else:
result.append((toknum, tokval))
return untokenize(result)
def enable_automatic_int_sympification(app):
"""
Allow IPython to automatically convert integer literals to Integer.
"""
hasshell = hasattr(app, 'shell')
import ast
if hasshell:
old_run_cell = app.shell.run_cell
else:
old_run_cell = app.run_cell
def my_run_cell(cell, *args, **kwargs):
try:
# Check the cell for syntax errors. This way, the syntax error
# will show the original input, not the transformed input. The
# downside here is that IPython magic like %timeit will not work
# with transformed input (but on the other hand, IPython magic
# that doesn't expect transformed input will continue to work).
ast.parse(cell)
except SyntaxError:
pass
else:
cell = int_to_Integer(cell)
old_run_cell(cell, *args, **kwargs)
if hasshell:
app.shell.run_cell = my_run_cell
else:
app.run_cell = my_run_cell
def enable_automatic_symbols(app):
"""Allow IPython to automatially create symbols (``isympy -a``). """
# XXX: This should perhaps use tokenize, like int_to_Integer() above.
# This would avoid re-executing the code, which can lead to subtle
# issues. For example:
#
# In [1]: a = 1
#
# In [2]: for i in range(10):
# ...: a += 1
# ...:
#
# In [3]: a
# Out[3]: 11
#
# In [4]: a = 1
#
# In [5]: for i in range(10):
# ...: a += 1
# ...: print b
# ...:
# b
# b
# b
# b
# b
# b
# b
# b
# b
# b
#
# In [6]: a
# Out[6]: 12
#
# Note how the for loop is executed again because `b` was not defined, but `a`
# was already incremented once, so the result is that it is incremented
# multiple times.
import re
re_nameerror = re.compile(
"name '(?P<symbol>[A-Za-z_][A-Za-z0-9_]*)' is not defined")
def _handler(self, etype, value, tb, tb_offset=None):
"""Handle :exc:`NameError` exception and allow injection of missing symbols. """
if etype is NameError and tb.tb_next and not tb.tb_next.tb_next:
match = re_nameerror.match(str(value))
if match is not None:
# XXX: Make sure Symbol is in scope. Otherwise you'll get infinite recursion.
self.run_cell("%(symbol)s = Symbol('%(symbol)s')" %
{'symbol': match.group("symbol")}, store_history=False)
try:
code = self.user_ns['In'][-1]
except (KeyError, IndexError):
pass
else:
self.run_cell(code, store_history=False)
return None
finally:
self.run_cell("del %s" % match.group("symbol"),
store_history=False)
stb = self.InteractiveTB.structured_traceback(
etype, value, tb, tb_offset=tb_offset)
self._showtraceback(etype, value, stb)
if hasattr(app, 'shell'):
app.shell.set_custom_exc((NameError,), _handler)
else:
# This was restructured in IPython 0.13
app.set_custom_exc((NameError,), _handler)
def init_ipython_session(argv=[], auto_symbols=False, auto_int_to_Integer=False):
"""Construct new IPython session. """
import IPython
if IPython.__version__ >= '0.11':
# use an app to parse the command line, and init config
# IPython 1.0 deprecates the frontend module, so we import directly
# from the terminal module to prevent a deprecation message from being
# shown.
if IPython.__version__ >= '1.0':
from IPython.terminal import ipapp
else:
from IPython.frontend.terminal import ipapp
app = ipapp.TerminalIPythonApp()
# don't draw IPython banner during initialization:
app.display_banner = False
app.initialize(argv)
if auto_symbols:
readline = import_module("readline")
if readline:
enable_automatic_symbols(app)
if auto_int_to_Integer:
enable_automatic_int_sympification(app)
return app.shell
else:
from IPython.Shell import make_IPython
return make_IPython(argv)
def init_python_session():
"""Construct new Python session. """
from code import InteractiveConsole
class SymPyConsole(InteractiveConsole):
"""An interactive console with readline support. """
def __init__(self):
InteractiveConsole.__init__(self)
try:
import readline
except ImportError:
pass
else:
import os
import atexit
readline.parse_and_bind('tab: complete')
if hasattr(readline, 'read_history_file'):
history = os.path.expanduser('~/.sympy-history')
try:
readline.read_history_file(history)
except IOError:
pass
atexit.register(readline.write_history_file, history)
return SymPyConsole()
def init_session(ipython=None, pretty_print=True, order=None,
use_unicode=None, use_latex=None, quiet=False, auto_symbols=False,
auto_int_to_Integer=False, argv=[]):
"""
Initialize an embedded IPython or Python session. The IPython session is
initiated with the --pylab option, without the numpy imports, so that
matplotlib plotting can be interactive.
Parameters
==========
pretty_print: boolean
If True, use pretty_print to stringify;
if False, use sstrrepr to stringify.
order: string or None
There are a few different settings for this parameter:
lex (default), which is lexographic order;
grlex, which is graded lexographic order;
grevlex, which is reversed graded lexographic order;
old, which is used for compatibility reasons and for long expressions;
None, which sets it to lex.
use_unicode: boolean or None
If True, use unicode characters;
if False, do not use unicode characters.
use_latex: boolean or None
If True, use latex rendering if IPython GUI's;
if False, do not use latex rendering.
quiet: boolean
If True, init_session will not print messages regarding its status;
if False, init_session will print messages regarding its status.
auto_symbols: boolean
If True, IPython will automatically create symbols for you.
If False, it will not.
The default is False.
auto_int_to_Integer: boolean
If True, IPython will automatically wrap int literals with Integer, so
that things like 1/2 give Rational(1, 2).
If False, it will not.
The default is False.
ipython: boolean or None
If True, printing will initialize for an IPython console;
if False, printing will initialize for a normal console;
The default is None, which automatically determines whether we are in
an ipython instance or not.
argv: list of arguments for IPython
See sympy.bin.isympy for options that can be used to initialize IPython.
See Also
========
sympy.interactive.printing.init_printing: for examples and the rest of the parameters.
Examples
========
>>> from sympy import init_session, Symbol, sin, sqrt
>>> sin(x) #doctest: +SKIP
NameError: name 'x' is not defined
>>> init_session() #doctest: +SKIP
>>> sin(x) #doctest: +SKIP
sin(x)
>>> sqrt(5) #doctest: +SKIP
___
\/ 5
>>> init_session(pretty_print=False) #doctest: +SKIP
>>> sqrt(5) #doctest: +SKIP
sqrt(5)
>>> y + x + y**2 + x**2 #doctest: +SKIP
x**2 + x + y**2 + y
>>> init_session(order='grlex') #doctest: +SKIP
>>> y + x + y**2 + x**2 #doctest: +SKIP
x**2 + y**2 + x + y
>>> init_session(order='grevlex') #doctest: +SKIP
>>> y * x**2 + x * y**2 #doctest: +SKIP
x**2*y + x*y**2
>>> init_session(order='old') #doctest: +SKIP
>>> x**2 + y**2 + x + y #doctest: +SKIP
x + y + x**2 + y**2
>>> theta = Symbol('theta') #doctest: +SKIP
>>> theta #doctest: +SKIP
theta
>>> init_session(use_unicode=True) #doctest: +SKIP
>>> theta # doctest: +SKIP
\u03b8
"""
import sys
in_ipython = False
if ipython is not False:
try:
import IPython
except ImportError:
if ipython is True:
raise RuntimeError("IPython is not available on this system")
ip = None
else:
if IPython.__version__ >= '0.11':
try:
ip = get_ipython()
except NameError:
ip = None
else:
ip = IPython.ipapi.get()
if ip:
ip = ip.IP
in_ipython = bool(ip)
if ipython is None:
ipython = in_ipython
if ipython is False:
ip = init_python_session()
mainloop = ip.interact
else:
if ip is None:
ip = init_ipython_session(argv=argv, auto_symbols=auto_symbols,
auto_int_to_Integer=auto_int_to_Integer)
if IPython.__version__ >= '0.11':
# runsource is gone, use run_cell instead, which doesn't
# take a symbol arg. The second arg is `store_history`,
# and False means don't add the line to IPython's history.
ip.runsource = lambda src, symbol='exec': ip.run_cell(src, False)
#Enable interactive plotting using pylab.
try:
ip.enable_pylab(import_all=False)
except Exception:
# Causes an import error if matplotlib is not installed.
# Causes other errors (depending on the backend) if there
# is no display, or if there is some problem in the
# backend, so we have a bare "except Exception" here
pass
if not in_ipython:
mainloop = ip.mainloop
readline = import_module("readline")
if auto_symbols and (not ipython or IPython.__version__ < '0.11' or not readline):
raise RuntimeError("automatic construction of symbols is possible only in IPython 0.11 or above with readline support")
if auto_int_to_Integer and (not ipython or IPython.__version__ < '0.11'):
raise RuntimeError("automatic int to Integer transformation is possible only in IPython 0.11 or above")
_preexec_source = preexec_source
ip.runsource(_preexec_source, symbol='exec')
init_printing(pretty_print=pretty_print, order=order,
use_unicode=use_unicode, use_latex=use_latex, ip=ip)
message = _make_message(ipython, quiet, _preexec_source)
if not in_ipython:
mainloop(message)
sys.exit('Exiting ...')
else:
ip.write(message)
import atexit
atexit.register(lambda ip: ip.write("Exiting ...\n"), ip)
| bsd-3-clause |
MuhammadVT/davitpy | davitpy/pydarn/plotting/musicPlot.py | 1 | 88631 | # -*- coding: utf-8 -*-
# Copyright (C) 2012 VT SuperDARN Lab
# Full license can be found in LICENSE.txt
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""musicPlot module
A module for plotting objects created and processed with the pydarn.proc.music module.
Notes
-----
Please see the pydarn.proc.music module documentation and the iPython notebooks included in the docs
folder of the DaViTPy distribution.
Module author: Nathaniel A. Frissell, Fall 2013
Functions
--------------------------------------------------
daynight_terminator Calculate day/night terminator
plotRelativeRanges cell distances
rangeBeamPlot range versus beam
timeSeriesMultiPlot time series
spectrumMultiPlot 1D line spectral data
multiPlot time series or spectral data
plotFullSpectrum full spectrum of musicArray
plotDlm cross spectral matrix
plotKarr horizontal wave number
plotKarrDetected add in use of detectSignals()
plotKarrAxis Karr plot without titles
--------------------------------------------------
Classes
---------------------------------------
musicFan fan plot of musicArray data
musicRTI RTI plot of musicArray data
---------------------------------------
"""
import numpy as np
import scipy as sp
import datetime
from matplotlib.collections import PolyCollection
from matplotlib.patches import Polygon
from matplotlib import dates as md
import matplotlib
from mpl_toolkits.basemap import Basemap
from davitpy import utils
from davitpy.pydarn.radar.radUtils import getParamDict
from davitpy.pydarn.proc.music import getDataSet
import logging
#Global Figure Size
figsize=(20,10)
def daynight_terminator(date, lons):
"""Calculates the latitude, Greenwich Hour Angle, and solar
declination from a given latitude and longitude.
This routine is used by musicRTI for terminator calculations.
Parameters
----------
date : datetime.datetime
UT date and time of terminator calculation.
lons : np.array
Longitudes of which to calculate the terminator.
Returns
-------
lats : np.array
Latitudes of solar terminator.
tau : np.array
Greenwhich Hour Angle.
dec : np.array
Solar declination.
Notes
-----
Adapted from mpl_toolkits.basemap.solar by Nathaniel A. Frissell, Fall 2013
"""
import mpl_toolkits.basemap.solar as solar
dg2rad = np.pi/180.
# compute greenwich hour angle and solar declination
# from datetime object (assumed UTC).
tau, dec = solar.epem(date)
# compute day/night terminator from hour angle, declination.
longitude = lons + tau
lats = np.arctan(-np.cos(longitude*dg2rad)/np.tan(dec*dg2rad))/dg2rad
return lats,tau,dec
class musicFan(object):
"""Class to plot a fan plot using a pydarn.proc.music.musicArray object as the data source.
Parameters
----------
dataObj : pydarn.proc.music.musicArray
musicArray object
dataSet : Optional[str]
Which dataSet in the musicArray object to plot
time : Optional[None or datetime.datetime]
Time scan plot. If None, the first time in dataSet will be used.
axis : Optional[None or matplotlib.figure.axis]
Matplotlib axis on which to plot. If None, a new figure and axis will be created.
scale : Optional[None or 2-Element iterable]
Colorbar scale. If None, the default scale for the current SuperDARN parameter will be used.
autoScale : Optional[bool]
If True, automatically scale the color bar for good data visualization. Keyword scale must
be None when using autoScale.
plotZeros : Optional[bool]
If True, plot cells that are exactly 0.
markCell : Optional[None or 2-Element iterable]
Mark the (beam, rangeGate) with black.
markBeam : Optional[None or int]
Mark a chosen beam.
markBeam_dict : Optional[dict]
dictionary of keywords defining markBeam line properties.
plotTerminator : Optional[bool]
If True, overlay day/night terminator on map. Uses Basemap's nightshade.
plot_title : Optional[bool]
If True, plot the title information
title : Optional[str]
Overide default title text.
parallels_ticks : Optional[list]
Where to draw the parallel (latitude) lines
meridians_ticks : Optional[list]
Where to draw the meridian (longitude) lines
zoom : Optional[float]
Multiply the map height and width by this factor (bigger number shows more area).
lat_shift : Optional[float]
Add this number to the computed lat_0 sent to basemap.
lon_shift : Optional[float]
Add this number to the computed lon_0 sent to basemap.
cmap_handling : Optional[str]
'superdarn' to use SuperDARN-style colorbars, 'matplotlib' for direct use of matplotlib's colorbars.
'matplotlib' is recommended when using custom scales and the 'superdarn' mode is not providing a desirable result.
cmap : Optional[one or matplotlib colormap object]
If Nonei and cmap_handling=='matplotlib', use jet.
plot_cbar : Optional[bool]
If True, plot the color bar.
cbar_ticks : Optional[list]
Where to put the ticks on the color bar.
cbar_shrink : Optional[float]
Fraction by which to shrink the colorbar
cbar_fraction : Optional[float]
Fraction of original axes to use for colorbar
cbar_gstext_offset : Optional[float]
y-offset from colorbar of "Ground Scatter Only" text
cbar_gstext_fontsize : Optional[float]
Fontsize of "Ground Scatter Only" text
model_text_size : Optional[int]
fontsize of model and coordinate indicator text
draw_coastlines : Optional[bool]
If True, draw the coastlines.
basemap_dict : Optional[dict]
Dictionary of keywords sent to the basemap invocation
**kwArgs
Keyword Arguments
Attributes
----------
map_obj
pcoll
Written by Nathaniel A. Frissell, Fall 2013
"""
def __init__(self,dataObject,
dataSet = 'active',
time = None,
axis = None,
scale = None,
autoScale = False,
plotZeros = False,
markCell = None,
markBeam = None,
markBeam_dict = {'color':'white','lw':2},
plotTerminator = True,
parallels_ticks = None,
meridians_ticks = None,
zoom = 1.,
lat_shift = 0.,
lon_shift = 0.,
cmap_handling = 'superdarn',
cmap = None,
plot_cbar = True,
cbar_ticks = None,
cbar_shrink = 1.0,
cbar_fraction = 0.15,
cbar_gstext_offset = -0.075,
cbar_gstext_fontsize = None,
model_text_size = 'small',
draw_coastlines = True,
basemap_dict = {},
plot_title = True,
title = None,
**kwArgs):
if axis is None:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=figsize)
from scipy import stats
# Make some variables easier to get to...
currentData = getDataSet(dataObject,dataSet)
metadata = currentData.metadata
latFull = currentData.fov.latFull
lonFull = currentData.fov.lonFull
coords = metadata['coords']
# Translate parameter information from short to long form.
paramDict = getParamDict(metadata['param'])
if paramDict.has_key('label'):
param = paramDict['param']
cbarLabel = paramDict['label']
else:
param = 'width' # Set param = 'width' at this point just to not screw up the colorbar function.
cbarLabel = metadata['param']
# Set colorbar scale if not explicitly defined.
if(scale is None):
if autoScale:
sd = stats.nanstd(np.abs(currentData.data),axis=None)
mean = stats.nanmean(np.abs(currentData.data),axis=None)
scMax = np.ceil(mean + 1.*sd)
if np.min(currentData.data) < 0:
scale = scMax*np.array([-1.,1.])
else:
scale = scMax*np.array([0.,1.])
else:
if paramDict.has_key('range'):
scale = paramDict['range']
else:
scale = [-200,200]
# See if an axis is provided... if not, set one up!
if axis is None:
axis = fig.add_subplot(111)
else:
fig = axis.get_figure()
# Figure out which scan we are going to plot...
if time is None:
timeInx = 0
else:
timeInx = (np.where(currentData.time >= time))[0]
if np.size(timeInx) == 0:
timeInx = -1
else:
timeInx = int(np.min(timeInx))
# do some stuff in map projection coords to get necessary width and height of map
lonFull,latFull = (np.array(lonFull)+360.)%360.,np.array(latFull)
goodLatLon = np.logical_and( np.logical_not(np.isnan(lonFull)), np.logical_not(np.isnan(latFull)) )
goodInx = np.where(goodLatLon)
goodLatFull = latFull[goodInx]
goodLonFull = lonFull[goodInx]
tmpmap = Basemap(projection='npstere', boundinglat=20,lat_0=90, lon_0=np.mean(goodLonFull))
x,y = tmpmap(goodLonFull,goodLatFull)
minx = x.min()
miny = y.min()
maxx = x.max()
maxy = y.max()
width = (maxx-minx)
height = (maxy-miny)
cx = minx + width/2.
cy = miny + height/2.
lon_0,lat_0 = tmpmap(cx, cy, inverse=True)
lon_0 = np.mean(goodLonFull)
dist = width/50.
# Fill the entire subplot area without changing the data aspect ratio.
bbox = axis.get_window_extent()
bbox_width = bbox.width
bbox_height = bbox.height
ax_aspect = bbox_width / bbox_height
map_aspect = width / height
if map_aspect < ax_aspect:
width = (height*bbox_width) / bbox_height
if map_aspect > ax_aspect:
height = (width*bbox_height) / bbox_width
# Zoom!
width = zoom * width
height = zoom * height
lat_0 = lat_0 + lat_shift
lon_0 = lon_0 + lon_shift
bmd = basemap_dict.copy()
width = bmd.pop('width', width)
height = bmd.pop('height', height)
lat_0 = bmd.pop('lat_0', lat_0)
lon_0 = bmd.pop('lon_0', lon_0)
# draw the actual map we want
m = Basemap(projection='stere',width=width,height=height,lon_0=lon_0,lat_0=lat_0,ax=axis,**bmd)
if parallels_ticks is None:
parallels_ticks = np.arange(-80.,81.,10.)
if meridians_ticks is None:
meridians_ticks = np.arange(-180.,181.,20.)
m.drawparallels(parallels_ticks,labels=[1,0,0,0])
m.drawmeridians(meridians_ticks,labels=[0,0,0,1])
if(coords == 'geo') and draw_coastlines == True:
m.drawcoastlines(linewidth=0.5,color='k')
m.drawmapboundary(fill_color='w')
m.fillcontinents(color='w', lake_color='w')
# Plot the SuperDARN data!
ngates = np.shape(currentData.data)[2]
nbeams = np.shape(currentData.data)[1]
verts = []
scan = []
data = currentData.data[timeInx,:,:]
for bm in range(nbeams):
for rg in range(ngates):
if goodLatLon[bm,rg] == False: continue
if np.isnan(data[bm,rg]): continue
if data[bm,rg] == 0 and not plotZeros: continue
scan.append(data[bm,rg])
x1,y1 = m(lonFull[bm+0,rg+0],latFull[bm+0,rg+0])
x2,y2 = m(lonFull[bm+1,rg+0],latFull[bm+1,rg+0])
x3,y3 = m(lonFull[bm+1,rg+1],latFull[bm+1,rg+1])
x4,y4 = m(lonFull[bm+0,rg+1],latFull[bm+0,rg+1])
verts.append(((x1,y1),(x2,y2),(x3,y3),(x4,y4),(x1,y1)))
if (cmap_handling == 'matplotlib') or autoScale:
if cmap is None:
cmap = matplotlib.cm.jet
bounds = np.linspace(scale[0],scale[1],256)
norm = matplotlib.colors.BoundaryNorm(bounds,cmap.N)
elif cmap_handling == 'superdarn':
colors = 'lasse'
cmap,norm,bounds = utils.plotUtils.genCmap(param,scale,colors=colors)
# pcoll = PolyCollection(np.array(verts),edgecolors='face',linewidths=0,closed=False,cmap=cmap,norm=norm,zorder=99)
pcoll = PolyCollection(np.array(verts),edgecolors='face',closed=False,cmap=cmap,norm=norm,zorder=99)
pcoll.set_array(np.array(scan))
axis.add_collection(pcoll,autolim=False)
# Mark Cell
if markCell is not None:
beamInx = int(np.where(currentData.fov.beams == markCell[0])[0])
gateInx = int(np.where(currentData.fov.gates == markCell[1])[0])
x1,y1 = m(lonFull[beamInx+0,gateInx+0],latFull[beamInx+0,gateInx+0])
x2,y2 = m(lonFull[beamInx+1,gateInx+0],latFull[beamInx+1,gateInx+0])
x3,y3 = m(lonFull[beamInx+1,gateInx+1],latFull[beamInx+1,gateInx+1])
x4,y4 = m(lonFull[beamInx+0,gateInx+1],latFull[beamInx+0,gateInx+1])
mkv = np.array([[x1,y1],[x2,y2],[x3,y3],[x4,y4],[x1,y1]])
poly = Polygon(mkv,facecolor='#000000',edgecolor='none',zorder=100)
axis.add_patch(poly)
# Mark Beam
if markBeam is not None:
beamInx = int(np.where(currentData.fov.beams == markBeam)[0])
startedMarking = False
for gateInx in range(ngates):
if goodLatLon[beamInx,gateInx] == False: continue
x1,y1 = m(lonFull[beamInx+0,gateInx+0],latFull[beamInx+0,gateInx+0])
x2,y2 = m(lonFull[beamInx+1,gateInx+0],latFull[beamInx+1,gateInx+0])
x3,y3 = m(lonFull[beamInx+1,gateInx+1],latFull[beamInx+1,gateInx+1])
x4,y4 = m(lonFull[beamInx+0,gateInx+1],latFull[beamInx+0,gateInx+1])
axis.plot([x1,x4],[y1,y4],zorder=150,**markBeam_dict)
axis.plot([x2,x3],[y2,y3],zorder=150,**markBeam_dict)
if not startedMarking:
axis.plot([x1,x2],[y1,y2],zorder=150,**markBeam_dict)
startedMarking = True
if gateInx == ngates-1:
axis.plot([x3,x4],[y3,y4],zorder=150,**markBeam_dict)
dataName = currentData.history[max(currentData.history.keys())] # Label the plot with the current level of data processing.
if plot_title:
if title is None:
axis.set_title(metadata['name']+' - '+dataName+currentData.time[timeInx].strftime('\n%Y %b %d %H%M UT'))
else:
axis.set_title(title)
if plot_cbar:
cbar = fig.colorbar(pcoll,orientation='vertical',shrink=cbar_shrink,fraction=cbar_fraction)
cbar.set_label(cbarLabel)
if cbar_ticks is None:
labels = cbar.ax.get_yticklabels()
labels[-1].set_visible(False)
else:
cbar.set_ticks(cbar_ticks)
if currentData.metadata.has_key('gscat'):
if currentData.metadata['gscat'] == 1:
cbar.ax.text(0.5,cbar_gstext_offset,'Ground\nscat\nonly',ha='center',fontsize=cbar_gstext_fontsize)
txt = 'Coordinates: ' + metadata['coords'] +', Model: ' + metadata['model']
axis.text(1.01, 0, txt,
horizontalalignment='left',
verticalalignment='bottom',
rotation='vertical',
size=model_text_size,
transform=axis.transAxes)
if plotTerminator:
m.nightshade(currentData.time[timeInx])
self.map_obj = m
self.pcoll = pcoll
class musicRTI(object):
"""Class to create an RTI plot using a pydarn.proc.music.musicArray object as the data source.
Parameters
----------
dataObj : pydarn.proc.music.musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to plot
beam : Optional[int]
Beam number to plot.
xlim : Optoinal[None or 2-element iterable of datetime.datetime]
Limits for x-axis.
ylim : Optional[None or 2-element iterable of floats]
Limits for y-axis.
axis : Optional[None or matplotlib.figure.axis]
Matplotlib axis on which to plot. If None, a new figure and axis will be created.
scale : Optional[None or 2-Element iterable]
Colorbar scale. If None, the default scale for the current SuperDARN parameter will be used.
plotZeros : Optional[bool]
If True, plot data cells that are identically zero.
max_sounding_time : Optional[None or datetime.timedelta]
Do not allow data to be plotted for longer than this duration.
xBoundaryLimits: Optional[None or 2-element iterable of datetime.datetime]
Mark a region of times on the RTI plot. A green dashed vertical line will be plotted
at each of the boundary times. The region of time outside of the boundary will be shaded gray.
If set to None, this will automatically be set to the timeLimits set in the metadata, if they exist.
yBoundaryLimits : Optional[None or 2-element iterable of floats]
Mark a region of range on the RTI plot. A green dashed horizontal line will be plotted
at each of the boundary ranges. The region of time outside of the boundary will be shaded gray.
If set to None, this will automatically be set to the gateLimits set in the metadata, if they exist.
yticks : Optional[list]
Where to put the ticks on the y-axis.
ytick_lat_format : Optional[str]
%-style string format code for latitude y-tick labels
autoScale : Optional[bool]
If True, automatically scale the color bar for good data visualization. Keyword scale must be None when using autoScale.
ax.set_xlim(xlim)
plotTerminator : Optional[bool]
If True, overlay day/night terminator on the RTI plot. Every cell is evaluated for day/night and shaded accordingly. Therefore,
terminator resolution will match the resolution of the RTI plot data.
axvlines : Optional[None or list of datetime.datetime]
Dashed vertical lines will be drawn at each specified datetime.datetime.
axvline_color : Optional[str]
Matplotlib color code specifying color of the axvlines.
secondary_coords : Optional[str]
Secondary coordate system for RTI plot y-axis ('lat' or 'range')
plot_info : Optional[bool]
If True, plot frequency/noise plots
plot_title : Optional[bool]
If True, plot the title information
plot_range_limits_label : Optoinal[bool]
If True, plot the label corresponding to the range limits on the right-hand y-axis.
cmap_handling : Optional[str]
'superdarn' to use SuperDARN-style colorbars, 'matplotlib' for direct use of matplotlib's colorbars.
'matplotlib' is recommended when using custom scales and the 'superdarn' mode is not providing a desirable result.
plot_cbar : Optional[bool]
If True, plot the color bar.
cbar_ticks : Optional[list]
Where to put the ticks on the color bar.
cbar_shrink : Optional[float]
fraction by which to shrink the colorbar
cbar_fraction : Optional[float]
fraction of original axes to use for colorbar
cbar_gstext_offset : Optional[float]
y-offset from colorbar of "Ground Scatter Only" text
cbar_gstext_fontsize : Optional[float]
fontsize of "Ground Scatter Only" text
model_text_size : Optional[int]
fontsize of model and coordinate indicator text
**kwArgs :
Keyword Arguments
Attributes
----------
cbar_info : list
Written by Nathaniel A. Frissell, Fall 2013
"""
def __init__(self,dataObject,
dataSet = 'active',
beam = 7,
coords = 'gate',
xlim = None,
ylim = None,
axis = None,
scale = None,
plotZeros = False,
max_sounding_time = datetime.timedelta(minutes=4),
xBoundaryLimits = None,
yBoundaryLimits = None,
yticks = None,
ytick_lat_format = '.0f',
autoScale = False,
plotTerminator = True,
axvlines = None,
axvline_color = '0.25',
secondary_coords = 'lat',
plot_info = True,
plot_title = True,
plot_range_limits_label = True,
cmap_handling = 'superdarn',
cmap = None,
bounds = None,
norm = None,
plot_cbar = True,
cbar_ticks = None,
cbar_shrink = 1.0,
cbar_fraction = 0.15,
cbar_gstext_offset = -0.075,
cbar_gstext_fontsize = None,
model_text_size = 'small',
y_labelpad = None,
**kwArgs):
from scipy import stats
from rti import plotFreq,plotNoise
if axis is None:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=figsize)
# Make some variables easier to get to...
currentData = getDataSet(dataObject,dataSet)
metadata = currentData.metadata
latFull = currentData.fov.latFull
lonFull = currentData.fov.lonFull
latCenter = currentData.fov.latCenter
lonCenter = currentData.fov.lonCenter
time = currentData.time
beamInx = np.where(currentData.fov.beams == beam)[0]
radar_lats = latCenter[beamInx,:]
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
# Calculate terminator. ########################################################
if plotTerminator:
daylight = np.ones([nrTimes,nrGates],np.bool)
for tm_inx in range(nrTimes):
tm = time[tm_inx]
term_lons = lonCenter[beamInx,:]
term_lats,tau,dec = daynight_terminator(tm,term_lons)
if dec > 0: # NH Summer
day_inx = np.where(radar_lats < term_lats)[1]
else:
day_inx = np.where(radar_lats > term_lats)[1]
if day_inx.size != 0:
daylight[tm_inx,day_inx] = False
# Translate parameter information from short to long form.
paramDict = getParamDict(metadata['param'])
if paramDict.has_key('label'):
param = paramDict['param']
cbarLabel = paramDict['label']
else:
param = 'width' # Set param = 'width' at this point just to not screw up the colorbar function.
cbarLabel = metadata['param']
# Set colorbar scale if not explicitly defined.
if(scale is None):
if autoScale:
sd = stats.nanstd(np.abs(currentData.data),axis=None)
mean = stats.nanmean(np.abs(currentData.data),axis=None)
scMax = np.ceil(mean + 1.*sd)
if np.min(currentData.data) < 0:
scale = scMax*np.array([-1.,1.])
else:
scale = scMax*np.array([0.,1.])
else:
if paramDict.has_key('range'):
scale = paramDict['range']
else:
scale = [-200,200]
# See if an axis is provided... if not, set one up!
if axis is None:
axis = fig.add_subplot(111)
else:
fig = axis.get_figure()
if np.size(beamInx) == 0:
beamInx = 0
beam = currentData.fov.beams[0]
# Plot the SuperDARN data!
verts = []
scan = []
data = np.squeeze(currentData.data[:,beamInx,:])
# The coords keyword needs to be tested better. For now, just allow 'gate' only.
# Even in 'gate' mode, the geographic latitudes are plotted along with gate.
# if coords is None and metadata.has_key('coords'):
# coords = metadata['coords']
#
if coords not in ['gate','range']:
logging.warning('Coords "%s" not supported for RTI plots. Using "gate".' % coords)
coords = 'gate'
if coords == 'gate':
rnge = currentData.fov.gates
elif coords == 'range':
rnge = currentData.fov.slantRFull[beam,:]
xvec = [matplotlib.dates.date2num(x) for x in currentData.time]
for tm in range(nrTimes-1):
for rg in range(nrGates-1):
if np.isnan(data[tm,rg]): continue
if data[tm,rg] == 0 and not plotZeros: continue
if max_sounding_time is not None:
if (currentData.time[tm+1] - currentData.time[tm+0]) > max_sounding_time: continue
scan.append(data[tm,rg])
x1,y1 = xvec[tm+0],rnge[rg+0]
x2,y2 = xvec[tm+1],rnge[rg+0]
x3,y3 = xvec[tm+1],rnge[rg+1]
x4,y4 = xvec[tm+0],rnge[rg+1]
verts.append(((x1,y1),(x2,y2),(x3,y3),(x4,y4),(x1,y1)))
if (cmap_handling == 'matplotlib') or autoScale:
if cmap is None:
cmap = matplotlib.cm.jet
if bounds is None:
bounds = np.linspace(scale[0],scale[1],256)
if norm is None:
norm = matplotlib.colors.BoundaryNorm(bounds,cmap.N)
elif cmap_handling == 'superdarn':
colors = 'lasse'
cmap,norm,bounds = utils.plotUtils.genCmap(param,scale,colors=colors)
pcoll = PolyCollection(np.array(verts),edgecolors='face',linewidths=0,closed=False,cmap=cmap,norm=norm,zorder=99)
pcoll.set_array(np.array(scan))
axis.add_collection(pcoll,autolim=False)
# Plot the terminator! #########################################################
if plotTerminator:
# print 'Terminator functionality is disabled until further testing is completed.'
term_verts = []
term_scan = []
rnge = currentData.fov.gates
xvec = [matplotlib.dates.date2num(x) for x in currentData.time]
for tm in range(nrTimes-1):
for rg in range(nrGates-1):
if daylight[tm,rg]: continue
term_scan.append(1)
x1,y1 = xvec[tm+0],rnge[rg+0]
x2,y2 = xvec[tm+1],rnge[rg+0]
x3,y3 = xvec[tm+1],rnge[rg+1]
x4,y4 = xvec[tm+0],rnge[rg+1]
term_verts.append(((x1,y1),(x2,y2),(x3,y3),(x4,y4),(x1,y1)))
term_pcoll = PolyCollection(np.array(term_verts),facecolors='0.45',linewidth=0,zorder=99,alpha=0.25)
axis.add_collection(term_pcoll,autolim=False)
################################################################################
if axvlines is not None:
for line in axvlines:
axis.axvline(line,color=axvline_color,ls='--')
if xlim is None:
xlim = (np.min(time),np.max(time))
axis.set_xlim(xlim)
axis.xaxis.set_major_formatter(md.DateFormatter('%H:%M'))
axis.set_xlabel('Time [UT]')
if ylim is None:
ylim = (np.min(rnge),np.max(rnge))
axis.set_ylim(ylim)
if yticks is not None:
axis.set_yticks(yticks)
# Y-axis labeling ##############################################################
if coords == 'gate':
if secondary_coords:
if secondary_coords == 'range':
if metadata['model'] == 'IS':
axis.set_ylabel('Range Gate\nSlant Range [km]',labelpad=y_labelpad)
elif metadata['model'] == 'GS':
axis.set_ylabel('Range Gate\nGS Mapped Range [km]',labelpad=y_labelpad)
else:
geo_mag = 'Geographic' if currentData.fov.coords == 'geo' else 'Magnetic'
if metadata['model'] == 'IS':
axis.set_ylabel('Range Gate\n%s Latitude' % geo_mag,labelpad=y_labelpad)
elif metadata['model'] == 'GS':
axis.set_ylabel('Range Gate\nGS Mapped %s Latitude' % geo_mag,labelpad=y_labelpad)
yticks = axis.get_yticks()
ytick_str = []
for tck in yticks:
txt = []
txt.append('%d' % tck)
rg_inx = np.where(tck == currentData.fov.gates)[0]
if np.size(rg_inx) != 0:
if secondary_coords == 'range':
rang = currentData.fov.slantRCenter[beamInx,rg_inx]
if np.isfinite(rang):
txt.append('%d' % rang)
else:
txt.append('')
else:
lat = currentData.fov.latCenter[beamInx,rg_inx]
if np.isfinite(lat):
txt.append((u'%'+ytick_lat_format+'$^o$') % lat)
else:
txt.append('')
txt = '\n'.join(txt)
ytick_str.append(txt)
axis.set_yticklabels(ytick_str,rotation=90,ma='center')
else:
axis.set_ylabel('Range Gate',labelpad=y_labelpad)
elif coords == 'range':
if secondary_coords == 'lat':
# Use linear interpolation to get the latitude associated with a particular range.
# Make sure we only include finite values in the interpolation function.
finite_inx = np.where(np.isfinite(currentData.fov.latCenter[beam,:]))[0]
tmp_ranges = currentData.fov.slantRCenter[beam,:][finite_inx]
tmp_lats = currentData.fov.latCenter[beam,:][finite_inx]
tmp_fn = sp.interpolate.interp1d(tmp_ranges,tmp_lats)
yticks = axis.get_yticks()
ytick_str = []
for tck in yticks:
txt = []
# Append Latitude
try:
lat = tmp_fn(tck)
txt.append((u'%'+ytick_lat_format+'$^o$') % lat)
except:
txt.append('')
# Append Range
txt.append('%d' % tck)
txt = '\n'.join(txt)
ytick_str.append(txt) # Put both lat and range on same string
axis.set_yticklabels(ytick_str,rotation=90,ma='center') # Set yticklabels
# Label y-axis
geo_mag = 'Geographic' if currentData.fov.coords == 'geo' else 'Magnetic'
if metadata['model'] == 'IS':
axis.set_ylabel('%s Latitude\nSlant Range [km]' % geo_mag,labelpad=y_labelpad)
elif metadata['model'] == 'GS':
axis.set_ylabel('GS Mapped %s Latitude\nGS Mapped Range [km]' % geo_mag,labelpad=y_labelpad)
else:
if metadata['model'] == 'IS':
axis.set_ylabel('Slant Range [km]',labelpad=y_labelpad)
elif metadata['model'] == 'GS':
axis.set_ylabel('GS Mapped Range [km]',labelpad=y_labelpad)
axis.set_ylim(ylim)
# Shade xBoundary Limits
if xBoundaryLimits is None:
if currentData.metadata.has_key('timeLimits'):
xBoundaryLimits = currentData.metadata['timeLimits']
if xBoundaryLimits is not None:
gray = '0.75'
# axis.axvspan(xlim[0],xBoundaryLimits[0],color=gray,zorder=150,alpha=0.5)
# axis.axvspan(xBoundaryLimits[1],xlim[1],color=gray,zorder=150,alpha=0.5)
axis.axvspan(xlim[0],xBoundaryLimits[0],color=gray,zorder=1)
axis.axvspan(xBoundaryLimits[1],xlim[1],color=gray,zorder=1)
axis.axvline(x=xBoundaryLimits[0],color='g',ls='--',lw=2,zorder=150)
axis.axvline(x=xBoundaryLimits[1],color='g',ls='--',lw=2,zorder=150)
# Shade yBoundary Limits
if yBoundaryLimits is None:
if currentData.metadata.has_key('gateLimits') and coords == 'gate':
yBoundaryLimits = currentData.metadata['gateLimits']
if currentData.metadata.has_key('rangeLimits') and coords == 'range':
yBoundaryLimits = currentData.metadata['rangeLimits']
if yBoundaryLimits is not None:
gray = '0.75'
# axis.axhspan(ylim[0],yBoundaryLimits[0],color=gray,zorder=150,alpha=0.5)
# axis.axhspan(yBoundaryLimits[1],ylim[1],color=gray,zorder=150,alpha=0.5)
axis.axhspan(ylim[0],yBoundaryLimits[0],color=gray,zorder=1)
axis.axhspan(yBoundaryLimits[1],ylim[1],color=gray,zorder=1)
axis.axhline(y=yBoundaryLimits[0],color='g',ls='--',lw=2,zorder=150)
axis.axhline(y=yBoundaryLimits[1],color='g',ls='--',lw=2,zorder=150)
for bnd_item in yBoundaryLimits:
if coords == 'gate':
txt = []
txt.append('%d' % bnd_item)
rg_inx = np.where(bnd_item == currentData.fov.gates)[0]
if np.size(rg_inx) != 0:
lat = currentData.fov.latCenter[beamInx,rg_inx]
if np.isfinite(lat):
txt.append(u'%.1f$^o$' % lat)
else:
txt.append('')
txt = '\n'.join(txt)
else:
txt = '%.1f' % bnd_item
if plot_range_limits_label:
axis.annotate(txt, (1.01, bnd_item) ,xycoords=('axes fraction','data'),rotation=90,ma='center')
if plot_cbar:
cbar = fig.colorbar(pcoll,orientation='vertical',shrink=cbar_shrink,fraction=cbar_fraction)
cbar.set_label(cbarLabel)
if cbar_ticks is None:
labels = cbar.ax.get_yticklabels()
labels[-1].set_visible(False)
else:
cbar.set_ticks(cbar_ticks)
if currentData.metadata.has_key('gscat'):
if currentData.metadata['gscat'] == 1:
cbar.ax.text(0.5,cbar_gstext_offset,'Ground\nscat\nonly',ha='center',fontsize=cbar_gstext_fontsize)
txt = 'Model: ' + metadata['model']
axis.text(1.01, 0, txt,
horizontalalignment='left',
verticalalignment='bottom',
rotation='vertical',
size=model_text_size,
transform=axis.transAxes)
# Get axis position information.
pos = list(axis.get_position().bounds)
# Plot frequency and noise information. ########################################
if hasattr(dataObject,'prm') and plot_info:
# Adjust current plot position to fit in the freq and noise plots.
super_plot_hgt = 0.06
pos[3] = pos[3] - (2*super_plot_hgt)
axis.set_position(pos)
# Get current colorbar position and adjust it.
cbar_pos = list(cbar.ax.get_position().bounds)
cbar_pos[1] = pos[1]
cbar_pos[3] = pos[3]
cbar.ax.set_position(cbar_pos)
curr_xlim = axis.get_xlim()
curr_xticks = axis.get_xticks()
pos[1] = pos[1] + pos[3]
pos[3] = super_plot_hgt
plotFreq(fig,dataObject.prm.time,dataObject.prm.tfreq,dataObject.prm.nave,pos=pos,xlim=curr_xlim,xticks=curr_xticks)
pos[1] = pos[1] + super_plot_hgt
plotNoise(fig,dataObject.prm.time,dataObject.prm.noisesky,dataObject.prm.noisesearch,pos=pos,xlim=curr_xlim,xticks=curr_xticks)
# Put a title on the RTI Plot. #################################################
if plot_title:
title_y = (pos[1] + pos[3]) + 0.015
xmin = pos[0]
xmax = pos[0] + pos[2]
txt = metadata['name']+' ('+metadata['fType']+')'
fig.text(xmin,title_y,txt,ha='left',weight=550)
txt = []
txt.append(xlim[0].strftime('%Y %b %d %H%M UT - ')+xlim[1].strftime('%Y %b %d %H%M UT'))
txt.append(currentData.history[max(currentData.history.keys())]) # Label the plot with the current level of data processing.
txt = '\n'.join(txt)
fig.text((xmin+xmax)/2.,title_y,txt,weight=550,size='large',ha='center')
txt = 'Beam '+str(beam)
fig.text(xmax,title_y,txt,weight=550,ha='right')
cbar_info = {}
cbar_info['cmap'] = cmap
cbar_info['bounds'] = bounds
cbar_info['norm'] = norm
cbar_info['label'] = cbarLabel
cbar_info['ticks'] = cbar_ticks
cbar_info['mappable'] = pcoll
self.cbar_info = cbar_info
def plotRelativeRanges(dataObj,dataSet='active',time=None,fig=None):
"""Plots the N-S and E-W distance from the center cell of a field-of-view in a
pydarn.proc.music.musicArray object. Also plots one scan of the chosen
dataSet, with the center cell marked in black.
Parameters
----------
dataObj : pydarn.proc.music.musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to plot
time : Optional[None or datetime.datetime]
Time scan plot. If None, the first time in dataSet will be used.
fig : Optional[None of matplotlib.figure]
matplotlib figure object that will be plotted to. If not provided, one will be created.
Returns
-------
fig : None of matplotlib.figure
matplotlib figure object that was plotted to
Written by Nathaniel A. Frissell, Fall 2013
"""
if fig is None:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=figsize)
currentData = getDataSet(dataObj,dataSet)
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.figure import Figure
import matplotlib
# Get center of FOV.
ctrBeamInx = currentData.fov.relative_centerInx[0]
ctrGateInx = currentData.fov.relative_centerInx[1]
ctrBeam = currentData.fov.beams[ctrBeamInx]
ctrGate = currentData.fov.gates[ctrGateInx]
ctrLat = currentData.fov.latCenter[ctrBeamInx,ctrGateInx]
ctrLon = currentData.fov.lonCenter[ctrBeamInx,ctrGateInx]
gs = matplotlib.gridspec.GridSpec(3, 2,hspace=None)
axis = fig.add_subplot(gs[0:2, 1])
musicFan(dataObj,time=time,plotZeros=True,dataSet=dataSet,axis=axis,markCell=(ctrBeam,ctrGate))
# Determine the color scale for plotting.
def myround(x, base=50):
return int(base * round(float(x)/base))
absnanmax = np.nanmax(np.abs([currentData.fov.relative_x,currentData.fov.relative_y]))
rnd = myround(absnanmax)
scale = (-rnd, rnd)
# Determine nanmaximum ranges.
xRange = np.nanmax(currentData.fov.relative_x) - np.nanmin(currentData.fov.relative_x)
yRange = np.nanmax(currentData.fov.relative_y) - np.nanmin(currentData.fov.relative_y)
latRange = np.nanmax(currentData.fov.latCenter) - np.nanmin(currentData.fov.latCenter)
lonRange = np.nanmax(currentData.fov.lonCenter) - np.nanmin(currentData.fov.lonCenter)
axis = fig.add_subplot(gs[0:2, 0])
axis.set_axis_off()
text = []
text.append('X-Range [km]: %i' % xRange)
text.append('Y-Range [km]: %i' % yRange)
text.append('Lat Range [deg]: %.1f' % latRange)
text.append('Lon Range [deg]: %.1f' % lonRange)
text.append('Center Lat [deg]: %.1f' % ctrLat)
text.append('Center Lon [deg]: %.1f' % ctrLon)
text = '\n'.join(text)
axis.text(0,0.75,text)
xlabel = 'Beam'
ylabel = 'Gate'
cbarLabel = 'Distance from Center [km]'
axis = fig.add_subplot(gs[2,0])
data = currentData.fov.relative_y
title = 'N-S Distance from Center'
title = '\n'.join([title,'(Beam: %i, Gate: %i)' % (ctrBeam, ctrGate)])
rangeBeamPlot(currentData,data,axis,title=title,xlabel=xlabel,ylabel=ylabel,scale=scale,cbarLabel=cbarLabel)
axis = fig.add_subplot(gs[2,1])
data = currentData.fov.relative_x
title = 'E-W Distance from Center'
title = '\n'.join([title,'(Beam: %i, Gate: %i)' % (ctrBeam, ctrGate)])
rangeBeamPlot(currentData,data,axis,title=title,xlabel=xlabel,ylabel=ylabel,scale=scale,cbarLabel=cbarLabel)
return fig
def rangeBeamPlot(currentData,data,axis,title=None,xlabel=None,ylabel=None,param='velocity',scale=None,cbarLabel=None):
"""Plots data on a range versus beam plot with a colorbar.
Parameters
----------
currentData : pydarn.proc.music.musicDataObj
musicDataObj
data : numpy.array
nBeams x nGates Numpy array of data
axis : matplotlib.axis
matplotlib axis object on which to plot
title : Optional[None or str]
Title of plot.
xlabel : Optional[None or str]
X-axis label
ylabel : Optional[None or str]
Y-axis label
param : Optional[None or str]
Parameter used for colorbar selection.
scale : Optional[None or 2-element iterable]
Two-element colorbar scale.
cbarLabel : Optional[str]
Colorbar label.
Written by Nathaniel A. Frissell, Fall 2013
"""
fig = axis.get_figure()
ngates = len(currentData.fov.gates)
nbeams = len(currentData.fov.beams)
verts = []
scan = []
for bmInx in range(nbeams):
for rgInx in range(ngates):
scan.append(data[bmInx,rgInx])
bm = currentData.fov.beams[bmInx]
rg = currentData.fov.gates[rgInx]
x1,y1 = bm+0, rg+0
x2,y2 = bm+1, rg+0
x3,y3 = bm+1, rg+1
x4,y4 = bm+0, rg+1
verts.append(((x1,y1),(x2,y2),(x3,y3),(x4,y4),(x1,y1)))
if scale is None:
scale = (np.min(scan),np.max(scan))
cmap = matplotlib.cm.jet
bounds = np.linspace(scale[0],scale[1],256)
norm = matplotlib.colors.BoundaryNorm(bounds,cmap.N)
pcoll = PolyCollection(np.array(verts),edgecolors='face',linewidths=0,closed=False,cmap=cmap,norm=norm,zorder=99)
pcoll.set_array(np.array(scan))
axis.add_collection(pcoll,autolim=False)
axis.set_xlim(min(currentData.fov.beams), max(currentData.fov.beams)+1)
axis.set_ylim(min(currentData.fov.gates), max(currentData.fov.gates)+1)
if title is not None: axis.set_title(title)
if xlabel is not None: axis.set_xlabel(xlabel)
if ylabel is not None: axis.set_ylabel(ylabel)
cbar = fig.colorbar(pcoll,orientation='vertical')#,shrink=.65,fraction=.1)
if cbarLabel is not None: cbar.set_label(cbarLabel)
def timeSeriesMultiPlot(dataObj,dataSet='active',dataObj2=None,dataSet2=None,plotBeam=None,plotGate=None,fig=None,xlim=None,ylim=None,xlabel=None,ylabel=None,title=None,xBoundaryLimits=None):
"""Plots 1D line time series of selected cells in a pydarn.proc.music.musicArray object.
This defaults to 9 cells of the FOV.
Parameters
----------
dataObj : pydarn.proc.music.musicArray
musicArray object
dataSet : Optoinal[str]
which dataSet in the musicArray object to plot
dataObj2 : Optional[pydarn.proc.music.musicArray]
A second musicArray object to be overlain on the the first dataObj plot.
dataSet2 : Optional[str]
which dataSet in the second musicArray to plot
plotBeam : Optional[list of int]
list of beams to plot from
plotGate : Optional[list of int]
list of range gates to plot from
fig : Optional[matplotlib.figure]
matplotlib figure object that will be plotted to. If not provided, one will be created.
xlim : Optional[None or 2-element iterable]
X-axis limits of all plots
ylim : Optional[None or 2-element iterable]
Y-axis limits of all plots
xlabel : Optional[None or str]
X-axis label
ylabel : Optional[None or str]
Y-axis label
title : Optional[None or str]
Title of plot
xBoundaryLimits : Optional[None or 2-element iterable]
Element sequence to shade out portions of the data. Data outside of this range will be shaded gray,
Data inside of the range will have a white background. If set to None, this will automatically be set to the timeLimits set
in the metadata, if they exist.
Returns
-------
fig : matplotlib.figure
matplotlib figure object that was plotted to
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
xData1 = currentData.time
yData1 = currentData.data
beams = currentData.fov.beams
gates = currentData.fov.gates
if dataObj2 is not None and dataSet2 is None: dataSet2 == 'active'
if dataSet2 is not None:
if dataObj2 is not None:
currentData2 = getDataSet(dataObj2,dataSet2)
else:
currentData2 = getDataSet(dataObj,dataSet2)
xData2 = currentData2.time
yData2 = currentData2.data
yData2_title = currentData2.history[max(currentData2.history.keys())]
else:
xData2 = None
yData2 = None
yData2_title = None
# Define x-axis range
if xlim is None:
tmpLim = []
tmpLim.append(min(xData1))
tmpLim.append(max(xData1))
if xData2 is not None:
tmpLim.append(min(xData2))
tmpLim.append(max(xData2))
xlim = (min(tmpLim),max(tmpLim))
# Set x boundary limits using timeLimits, if they exist. Account for both dataSet1 and dataSet2, and write it so timeLimits can be any type of sequence.
if xBoundaryLimits is None:
tmpLim = []
if currentData.metadata.has_key('timeLimits'):
tmpLim.append(currentData.metadata['timeLimits'][0])
tmpLim.append(currentData.metadata['timeLimits'][1])
if dataSet2 is not None:
if currentData2.metadata.has_key('timeLimits'):
tmpLim.append(currentData2.metadata['timeLimits'][0])
tmpLim.append(currentData2.metadata['timeLimits'][1])
if tmpLim != []:
xBoundaryLimits = (min(tmpLim), max(tmpLim))
# Get X-Axis title.
if xlabel is None:
xlabel = 'UT'
# Get Y-Axis title.
paramDict = getParamDict(currentData.metadata['param'])
if ylabel is None and paramDict.has_key('label'):
ylabel = paramDict['label']
yData1_title = currentData.history[max(currentData.history.keys())] # Label the plot with the current level of data processing
if title is None:
title = []
title.append('Selected Cells: '+yData1_title)
title.append(currentData.metadata['code'][0].upper() + ': ' +
xlim[0].strftime('%Y %b %d %H:%M - ') + xlim[1].strftime('%Y %b %d %H:%M'))
title = '\n'.join(title)
multiPlot(xData1,yData1,beams,gates,yData1_title=yData1_title,fig=fig,xlim=xlim,ylim=ylim,xlabel=xlabel,ylabel=ylabel,title=title,
xData2=xData2,yData2=yData2,yData2_title=yData2_title,xBoundaryLimits=xBoundaryLimits)
def spectrumMultiPlot(dataObj,dataSet='active',plotType='real_imag',plotBeam=None,plotGate=None,fig=None,xlim=None,ylim=None,xlabel=None,ylabel=None,title=None,xBoundaryLimits=None):
"""Plots 1D line spectral plots of selected cells in a pydarn.proc.music.musicArray object.
This defaults to 9 cells of the FOV.
Parameters
----------
dataObj : pydarn.proc.music.musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to plot
plotType : Optional[str]
{'real_imag'|'magnitude'|'phase'}
plotBeam : Optional[list of int]
list of beams to plot from
plotGate : Optional[list of int]
list of range gates to plot from
fig : Optional[matplotlib.figure]
matplotlib figure object that will be plotted to. If not provided, one will be created.
xlim : Optional[None or 2-element iterable]
X-axis limits of all plots
ylim : Optional[None or 2-element iterable]
Y-axis limits of all plots
xlabel : Optional[None or str]
X-axis label
ylabel : Optional[None or str]
Y-axis label
title : Optional[None or str]
Title of plot
xBoundaryLimits : Optional[None or 2-element iterable]
Element sequence to shade out portions of the data. Data outside of this range will be shaded gray,
Data inside of the range will have a white background. If set to None, this will automatically be set to the timeLimits set
in the metadata, if they exist.
Returns
-------
fig : matplotlib.figure
matplotlib figure object that was plotted to
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getattr(dataObj,dataSet)
if plotType == 'magnitude':
xData1 = currentData.freqVec
yData1 = np.abs(currentData.spectrum)
yData1_title = 'Magnitude'
ylabel = 'Magnitude'
xData2 = None
yData2 = None
yData2_title = None
if xlim is None:
xlim = (0,np.max(xData1))
if ylim is None:
ylim = (0,np.max(yData1))
elif plotType == 'phase':
xData1 = currentData.freqVec
yData1 = np.angle(currentData.spectrum)
yData1_title = 'Magnitude'
ylabel = 'Phase [rad]'
xData2 = None
yData2 = None
yData2_title = None
if xlim is None:
xlim = (0,np.max(xData1))
else:
xData1 = currentData.freqVec
yData1 = np.real(currentData.spectrum)
yData1_title = 'Real Part'
ylabel = 'Amplitude'
xData2 = currentData.freqVec
yData2 = np.imag(currentData.spectrum)
yData2_title = 'Imaginary Part'
if xlim is None:
xlim = (np.min(xData1),np.max(xData1))
beams = currentData.fov.beams
gates = currentData.fov.gates
# Get the time limits.
timeLim = (np.min(currentData.time),np.max(currentData.time))
# Get X-Axis title.
if xlabel is None:
xlabel = 'Frequency [Hz]'
if title is None:
title = []
title.append('Selected Cells: '+currentData.history[max(currentData.history.keys())]) # Label the plot with the current level of data processing.
title.append(currentData.metadata['code'][0].upper() + ': ' +
timeLim[0].strftime('%Y %b %d %H:%M - ') + timeLim[1].strftime('%Y %b %d %H:%M'))
title = '\n'.join(title)
multiPlot(xData1,yData1,beams,gates,yData1_title=yData1_title,fig=fig,xlim=xlim,ylim=ylim,xlabel=xlabel,ylabel=ylabel,title=title,
xData2=xData2,yData2=yData2,yData2_title=yData2_title,xBoundaryLimits=xBoundaryLimits)
return fig
def multiPlot(xData1,yData1,beams,gates,yData1_title=None,plotBeam=None,plotGate=None,fig=None,xlim=None,ylim=None,xlabel=None,ylabel=None,title=None,
xData2=None,yData2=None,yData2_title=None,xBoundaryLimits=None):
"""Plots 1D time series or line spectral plots of selected cells in a 3d-array. Two data sets can be plotted simultaneously for comparison.
This defaults to 9 cells of the 3d-array.
Parameters
----------
xData1 : 1d list or numpy.array
x-axis values
yData1 : 3d numpy.array
Data to plot. First axis should correspond to xData1.
beams : Optional[list]
list identifying the beams present in the second axis of xData1.
gates : Optional[list]
list identifying the gates present in the second axis of xData1.
yData1_title : Optional[str]
Name of yData1 data.
plot_beam : Optional[list of int]
list of beams to plot from (corresponds to yData1 second axis)
plot_gate : Optional[list of int]
list of range gates to plot from (corresponds to yData1 third axis)
fig : Optional[matplotlib.figure]
matplotlib figure object that will be plotted to. If not provided, one will be created.
xlim : Optional[None or 2-element iterable]
X-axis limits of all plots
ylim : Optional[None or 2-element iterable]
Y-axis limits of all plots
xlabel : Optional[None or str]
X-axis label
ylabel : Optional[None or str]
Y-axis label
title : Optional[None or str]
Title of plot
xData2 : Optional[1d list or numpy.array]
x-axis values of second data set
yData1 : Optional[3d numpy.array]
Second data set data to plot. First axis should correspond to xData1.
yData2_title : Optional[str]
Name of yData2 data.
xBoundaryLimits : Optional[None or 2-element iterable]
Element sequence to shade out portions of the data. Data outside of this range will be shaded gray,
Data inside of the range will have a white background. If set to None, this will automatically be set to the timeLimits set
in the metadata, if they exist.
Returns
-------
fig : matplotlib.figure
matplotlib figure object that was plotted to
Written by Nathaniel A. Frissell, Fall 2013
"""
if fig is None:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=figsize)
from matplotlib import dates as md
# Calculate three default beams and gates to plot.
if plotBeam is None:
beamMin = min(beams)
beamMed = int(np.median(beams))
beamMax = max(beams)
plotBeam = np.array([beamMin,beamMed,beamMax])
if plotGate is None:
gateMin = min(gates)
gateMed = int(np.median(gates))
gateMax = max(gates)
plotGate = np.array([gateMin,gateMed,gateMax])
# Put things in the correct order. Gates need to be backwards.
plotBeam.sort()
plotGate.sort()
plotGate = plotGate[::-1] # Reverse the order.
# Determine the indices of the beams and gates.
plotBeamInx = []
for item in plotBeam:
plotBeamInx.append(int(np.where(beams == item)[0]))
plotGateInx = []
for item in plotGate:
plotGateInx.append(int(np.where(gates == item)[0]))
plotBeamInx = np.array(plotBeamInx)
plotGateInx = np.array(plotGateInx)
nCols = len(plotBeam)
nRows = len(plotGate)
# Define x-axis range
if xlim is None:
tmpLim = []
tmpLim.append(min(xData1))
tmpLim.append(max(xData1))
if xData2 is not None:
tmpLim.append(min(xData2))
tmpLim.append(max(xData2))
xlim = (min(tmpLim),max(tmpLim))
# Autorange y-axis... make all plots have the same range.
data = []
if ylim is None:
for rg,rgInx in zip(plotGate,plotGateInx):
for bm,bmInx in zip(plotBeam,plotBeamInx):
for item in yData1[:,bmInx,rgInx]:
data.append(item)
if yData2 is not None:
for item in yData2[:,bmInx,rgInx]:
data.append(item)
mx = np.nanmax(data)
mn = np.nanmin(data)
if np.logical_and(mx > 0,mn >= -0.001):
ylim = (0,mx)
elif np.logical_and(mn < 0, mx <= 0.001):
ylim = (mn,0)
elif abs(mx) >= abs(mn):
ylim = (-mx,mx)
elif abs(mn) > abs(mx):
ylim = (-abs(mn),abs(mn))
ii = 1
for rg,rgInx in zip(plotGate,plotGateInx):
for bm,bmInx in zip(plotBeam,plotBeamInx):
axis = fig.add_subplot(nCols,nRows,ii)
l1, = axis.plot(xData1,yData1[:,bmInx,rgInx],label=yData1_title)
if yData2 is not None:
l2, = axis.plot(xData2,yData2[:,bmInx,rgInx],label=yData2_title)
# Set axis limits.
axis.set_xlim(xlim)
axis.set_ylim(ylim)
# Special handling for time axes.
if xlabel == 'UT':
axis.xaxis.set_major_formatter(md.DateFormatter('%H:%M'))
labels = axis.get_xticklabels()
for label in labels:
label.set_rotation(30)
# Gray out area outside of the boundary.
if xBoundaryLimits is not None:
gray = '0.75'
axis.axvspan(xlim[0],xBoundaryLimits[0],color=gray)
axis.axvspan(xBoundaryLimits[1],xlim[1],color=gray)
axis.axvline(x=xBoundaryLimits[0],color='g',ls='--',lw=2)
axis.axvline(x=xBoundaryLimits[1],color='g',ls='--',lw=2)
text = 'Beam: %i, Gate: %i' % (bm, rg)
axis.text(0.02,0.92,text,transform=axis.transAxes)
# Only the first column gets labels.
if ii % nCols == 1:
axis.set_ylabel(ylabel)
# Only have the last row have time ticks
if ii <= (nRows-1)*nCols:
axis.xaxis.set_visible(False)
else:
axis.set_xlabel(xlabel)
ii = ii+1
if yData1_title is not None and yData2_title is not None:
fig.legend((l1,l2),(yData1_title,yData2_title),loc=(0.55,0.92))
if title is not None:
fig.text(0.12,0.92,title,size=24)
return fig
def plotFullSpectrum(dataObj,dataSet='active',
fig = None,
axis = None,
xlim = None,
normalize = False,
scale = None,
plot_title = True,
maxXTicks = 10.,
plot_cbar = True,
cbar_label = 'ABS(Spectral Density)',
cbar_ticks = None,
cbar_shrink = 1.0,
cbar_fraction = 0.15,
cbar_pad = 0.05,
cbar_gstext_offset = -0.075,
cbar_gstext_fontsize = None,
cbar_gstext_enable = True,
**kwArgs):
"""Plot full spectrum of a pydarn.proc.music.musicArray object. The spectrum must have already been calculated with
pydarn.proc.music.calculateFFT().
In this plot, major divisions on the x-axis are FFT bins. Every bin contains one slice representing each beam of the given radar
data, from left to right. The y-axis shows the range gates of the data object. The color bar at the top of the plot shows which
FFT bin contains the most power when integrating over the entire bin.
Parameters
----------
dataObj : pydarn.proc.music.musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to plot
fig : Optional[matplotlib.figure]
matplotlib figure object that will be plotted to. If not provided, one will be created.
axis : Optional[ ]
Matplotlib axis object to plot on.
xlim : Optional[None or 2-element iterable]
X-axis limits in Hz
plot_title : Optional[bool]
If True, plot the title information
maxXTicks : Optional[int]
Maximum number of xtick labels.
cbar_label : Optional[str]
Text for color bar label
cbar_ticks : Optional[list]
Where to put the ticks on the color bar.
cbar_shrink : Optional[float]
fraction by which to shrink the colorbar
cbar_fraction : Optional[float]
fraction of original axes to use for colorbar
cbar_gstext_offset : Optional[float]
y-offset from colorbar of "Ground Scatter Only" text
cbar_gstext_fontsize : Optional[float]
fontsize of "Ground Scatter Only" text
cbar_gstext_enable : Optional[bool]
Enable "Ground Scatter Only" text
**kwArgs :
Keyword Arguments
Returns
-------
return_dict
Written by Nathaniel A. Frissell, Fall 2013
"""
from scipy import stats
return_dict = {}
currentData = getDataSet(dataObj,dataSet)
nrFreqs,nrBeams,nrGates = np.shape(currentData.spectrum)
if xlim is None:
posFreqInx = np.where(currentData.freqVec >= 0)[0]
else:
posFreqInx = np.where(np.logical_and(currentData.freqVec >= xlim[0],currentData.freqVec <= xlim[1]))[0]
posFreqVec = currentData.freqVec[posFreqInx]
npf = len(posFreqVec) # Number of positive frequencies
data = np.abs(currentData.spectrum[posFreqInx,:,:]) # Use the magnitude of the positive frequency data.
if normalize:
data = data / data.max()
# Determine scale for colorbar.
sd = stats.nanstd(data,axis=None)
mean = stats.nanmean(data,axis=None)
scMax = mean + 2.*sd
if scale is None:
scale = scMax*np.array([0,1.])
nXBins = nrBeams * npf # number of bins we are going to plot
# Average Power Spectral Density
avg_psd = np.zeros(npf)
for x in range(npf): avg_psd[x] = np.mean(data[x,:,:])
# Do plotting here!
if fig is None and axis is None:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=figsize)
elif axis is not None:
fig = axis.get_figure()
if axis is None:
axis = fig.add_subplot(111)
verts = []
scan = []
# Plot Spectrum
sep = 0.1
for ff in range(npf):
for bb in range(nrBeams):
xx0 = nrBeams*(ff + 0.5*sep) + bb*(1-sep)
xx1 = xx0 + (1-sep)
for gg in range(nrGates):
scan.append(data[ff,bb,gg])
yy0 = gg
yy1 = gg + 1
x1,y1 = xx0, yy0
x2,y2 = xx1, yy0
x3,y3 = xx1, yy1
x4,y4 = xx0, yy1
verts.append(((x1,y1),(x2,y2),(x3,y3),(x4,y4),(x1,y1)))
param = 'power'
cmap = matplotlib.cm.Blues_r
bounds = np.linspace(scale[0],scale[1],256)
norm = matplotlib.colors.BoundaryNorm(bounds,cmap.N)
pcoll = PolyCollection(np.array(verts),edgecolors='face',linewidths=0,closed=False,cmap=cmap,norm=norm,zorder=99)
pcoll.set_array(np.array(scan))
axis.add_collection(pcoll,autolim=False)
spect_pcoll = pcoll
# Colorbar
if plot_cbar:
cbar = fig.colorbar(pcoll,orientation='vertical',shrink=cbar_shrink,fraction=cbar_fraction,pad=cbar_pad)
cbar.set_label(cbar_label)
if cbar_ticks is None:
labels = cbar.ax.get_yticklabels()
labels[-1].set_visible(False)
else:
cbar.set_ticks(cbar_ticks)
if currentData.metadata.has_key('gscat') and cbar_gstext_enable:
if currentData.metadata['gscat'] == 1:
cbar.ax.text(0.5,cbar_gstext_offset,'Ground\nscat\nonly',ha='center',fontsize=cbar_gstext_fontsize)
# Plot average values.
verts = []
scan = []
yy0 = nrGates
yy1 = nrGates + 1
for ff in range(npf):
scan.append(avg_psd[ff])
xx0 = nrBeams*(ff + 0.5*sep)
xx1 = xx0 + nrBeams*(1-sep)
x1,y1 = xx0, yy0
x2,y2 = xx1, yy0
x3,y3 = xx1, yy1
x4,y4 = xx0, yy1
verts.append(((x1,y1),(x2,y2),(x3,y3),(x4,y4),(x1,y1)))
param = 'power'
cmap = matplotlib.cm.winter
norm = matplotlib.colors.Normalize(vmin = 0, vmax = np.max(avg_psd))
pcoll = PolyCollection(np.array(verts),edgecolors='face',linewidths=0,closed=False,cmap=cmap,norm=norm,zorder=99)
pcoll.set_array(np.array(scan))
axis.add_collection(pcoll,autolim=False)
# Mark maximum PSD column.
maxInx = np.argmax(avg_psd)
xx0 = nrBeams*(maxInx + 0.5*sep)
xx1 = xx0 + nrBeams*(1-sep)
x1,y1 = xx0, yy0
x2,y2 = xx1, yy0
x3,y3 = xx1, yy1
x4,y4 = xx0, yy1
mkv = np.array([[x1,y1],[x2,y2],[x3,y3],[x4,y4],[x1,y1]])
poly = Polygon(mkv,facecolor='Red',edgecolor='none',zorder=100)
axis.add_patch(poly)
# X-Labels
modX = np.ceil(npf / np.float(maxXTicks))
xlabels = []
xpos = []
for ff in range(npf-1):
if (ff % modX) != 0: continue
freqLabel = '%.2f' % (posFreqVec[ff]*1000.)
if posFreqVec[ff] == 0:
periodLabel = 'Inf'
else:
periodLabel = '%.0f' % (1./posFreqVec[ff] / 60.)
xlabels.append(freqLabel+'\n'+periodLabel)
xpos.append(nrBeams* (ff + 0.1))
xlabels.append('freq [mHz]\nPer. [min]')
xpos.append(nrBeams* (npf-1 + 0.1))
axis.set_xticks(xpos)
axis.set_xticklabels(xlabels,ha='left')
# Y-Labels
maxYTicks = 10.
modY = np.ceil(nrGates/maxYTicks)
ylabels = []
ypos = []
for gg in range(nrGates):
if (gg % modY) != 0: continue
ylabels.append('%i' % currentData.fov.gates[gg])
ypos.append(gg+0.5)
ylabels.append('$\Sigma$PSD')
ypos.append(nrGates+0.5)
axis.set_yticks(ypos)
axis.set_yticklabels(ylabels)
axis.set_ylabel('Range Gate')
for ff in range(npf):
axis.axvline(x=ff*nrBeams,color='k',lw=2)
# axis.set_xlim([0,nXBins])
axis.set_ylim([0,nrGates+1])
if plot_title:
xpos = 0.130
fig.text(xpos,0.99,'Full Spectrum View',fontsize=20,va='top')
# Get the time limits.
timeLim = (np.min(currentData.time),np.max(currentData.time))
md = currentData.metadata
# Translate parameter information from short to long form.
paramDict = getParamDict(md['param'])
param = paramDict['param']
# cbarLabel = paramDict['label']
text = md['name'] + ' ' + param.capitalize() + timeLim[0].strftime(' (%Y %b %d %H:%M - ') + timeLim[1].strftime('%Y %b %d %H:%M)')
if md.has_key('fir_filter'):
filt = md['fir_filter']
if filt[0] is None:
low = 'None'
else:
low = '%.2f' % (1000. * filt[0])
if filt[1] is None:
high = 'None'
else:
high = '%.2f' % (1000. * filt[1])
text = text + '\n' + 'Digital Filter: [' + low + ', ' + high + '] mHz'
fig.text(xpos,0.95,text,fontsize=14,va='top')
return_dict['cbar_pcoll'] = spect_pcoll
return_dict['cbar_label'] = cbar_label
return return_dict
def plotDlm(dataObj,dataSet='active',fig=None):
"""Plot the cross spectral matrix of a pydarn.proc.music.musicArray object. The cross-spectral matrix must have already
been calculated for the chosen data set using pydarn.proc.music.calculateDlm().
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to plot
fig : Optional[matplotlib.figure]
matplotlib figure object that will be plotted to. If not provided, one will be created.
Written by Nathaniel A. Frissell, Fall 2013
"""
if fig is None:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=figsize)
import copy
from scipy import stats
currentData = getDataSet(dataObj,dataSet)
data = np.abs(currentData.Dlm)
# Determine scale for colorbar.
sd = stats.nanstd(data,axis=None)
mean = stats.nanmean(data,axis=None)
scMax = mean + 4.*sd
scale = scMax*np.array([0,1.])
# Do plotting here!
axis = fig.add_subplot(111)
nrL, nrM = np.shape(data)
verts = []
scan = []
# Plot Spectrum
for ll in range(nrL):
xx0 = ll
xx1 = ll+1
for mm in range(nrM):
scan.append(data[ll,mm])
yy0 = mm
yy1 = mm + 1
x1,y1 = xx0, yy0
x2,y2 = xx1, yy0
x3,y3 = xx1, yy1
x4,y4 = xx0, yy1
verts.append(((x1,y1),(x2,y2),(x3,y3),(x4,y4),(x1,y1)))
colors = 'lasse'
if scale is None:
scale = (np.min(scan),np.max(scan))
cmap = matplotlib.cm.jet
bounds = np.linspace(scale[0],scale[1],256)
norm = matplotlib.colors.BoundaryNorm(bounds,cmap.N)
pcoll = PolyCollection(np.array(verts),edgecolors='face',linewidths=0,closed=False,cmap=cmap,norm=norm,zorder=99)
pcoll.set_array(np.array(scan))
axis.add_collection(pcoll,autolim=False)
# Colorbar
cbar = fig.colorbar(pcoll,orientation='vertical')#,shrink=.65,fraction=.1)
cbar.set_label('ABS(Spectral Density)')
if currentData.metadata.has_key('gscat'):
if currentData.metadata['gscat'] == 1:
cbar.ax.text(0.5,-0.075,'Ground\nscat\nonly',ha='center')
# labels[-1].set_visible(False)
axis.set_xlim([0,nrL])
axis.set_ylim([0,nrM])
axis.set_xlabel('l')
axis.set_ylabel('m')
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
ticks = []
labels = []
mod = int(np.floor(nrGates / 10))
for x in xrange(nrGates):
if x % mod != 0: continue
ll = nrBeams*x
ticks.append(ll)
txt = '%i\n%i' % (ll, currentData.fov.gates[x])
labels.append(txt)
ticks.append(nrL)
xlabels = copy.copy(labels)
xlabels.append('l\ngate')
axis.set_xticks(ticks)
axis.set_xticklabels(xlabels,ha='left')
ylabels = copy.copy(labels)
ylabels.append('m\ngate')
axis.set_yticks(ticks)
axis.set_yticklabels(ylabels)
xpos = 0.130
fig.text(xpos,0.99,'ABS(Cross Spectral Density Matrix Dlm)',fontsize=20,va='top')
# Get the time limits.
timeLim = (np.min(currentData.time),np.max(currentData.time))
md = currentData.metadata
# Translate parameter information from short to long form.
paramDict = getParamDict(md['param'])
param = paramDict['param']
cbarLabel = paramDict['label']
text = md['name'] + ' ' + param.capitalize() + timeLim[0].strftime(' (%Y %b %d %H:%M - ') + timeLim[1].strftime('%Y %b %d %H:%M)')
if md.has_key('fir_filter'):
filt = md['fir_filter']
if filt[0] is None:
low = 'None'
else:
low = '%.2f' % (1000. * filt[0])
if filt[1] is None:
high = 'None'
else:
high = '%.2f' % (1000. * filt[1])
text = text + '\n' + 'Digital Filter: [' + low + ', ' + high + '] mHz'
fig.text(xpos,0.95,text,fontsize=14,va='top')
def plotKarr(dataObj,dataSet='active',fig=None,axis=None,maxSignals=None, sig_fontsize=24,
plot_title=True, cbar_ticks=None, cbar_shrink=1.0, cbar_fraction=0.15,
cbar_gstext_offset=-0.075, cbar_gstext_fontsize=None, **kwArgs):
"""Plot the horizontal wave number array for a pydarn.proc.music.musicArray object. The kArr must have aready
been calculated for the chosen data set using pydarn.proc.music.calculateKarr().
If the chosen data set has signals stored in the sigDetect attribute, numbers identifying each of the signals will
be plotted on the kArr plot.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to plot
fig : Optional[None or matplotlib.figure]
matplotlib figure object that will be plotted to. If not provided, one will be created.
axis : Optional[ ]
Matplotlib axis object to plot on.
maxSignals : Optional[None or int]
Maximum number of signals to plot if detected signals exist for the chosen data set.
sig_fontsize : Optional[float]
fontsize of signal markers
plot_title : Optional[bool]
If True, plot the title information
cbar_ticks : Optional[list]
Where to put the ticks on the color bar.
cbar_shrink : Optional[float]
fraction by which to shrink the colorbar
cbar_fraction : Optional[float]
fraction of original axes to use for colorbar
cbar_gstext_offset : Optional[float]
y-offset from colorbar of "Ground Scatter Only" text
cbar_gstext_fontsize : Optional[float]
fontsize of "Ground Scatter Only" text
**kwArgs
Keywords arguments
Written by Nathaniel A. Frissell, Fall 2013
"""
if fig is None and axis is None:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=figsize)
currentData = getDataSet(dataObj,dataSet)
# Do plotting here!
if axis is None:
axis = fig.add_subplot(111,aspect='equal')
else:
fig = axis.get_figure()
plotKarrAxis(dataObj,dataSet=dataSet,axis=axis,maxSignals=maxSignals,
cbar_ticks=cbar_ticks, cbar_shrink=cbar_shrink, cbar_fraction=cbar_fraction,sig_fontsize=sig_fontsize,
cbar_gstext_offset=cbar_gstext_offset, cbar_gstext_fontsize=cbar_gstext_fontsize,**kwArgs)
if plot_title:
xpos = 0.130
fig.text(xpos,0.99,'Horizontal Wave Number',fontsize=20,va='top')
# Get the time limits.
timeLim = (np.min(currentData.time),np.max(currentData.time))
md = currentData.metadata
# Translate parameter information from short to long form.
paramDict = getParamDict(md['param'])
param = paramDict['param']
text = md['name'] + ' ' + param.capitalize() + timeLim[0].strftime(' (%Y %b %d %H:%M - ') + timeLim[1].strftime('%Y %b %d %H:%M)')
if md.has_key('fir_filter'):
filt = md['fir_filter']
if filt[0] is None:
low = 'None'
else:
low = '%.2f' % (1000. * filt[0])
if filt[1] is None:
high = 'None'
else:
high = '%.2f' % (1000. * filt[1])
text = text + '\n' + 'Digital Filter: [' + low + ', ' + high + '] mHz'
fig.text(xpos,0.95,text,fontsize=14,va='top')
def plotKarrDetected(dataObj,dataSet='active',fig=None,maxSignals=None,roiPlot=True):
"""Plot the horizontal wave number array for a pydarn.proc.music.musicArray object. The kArr must have aready
been calculated for the chosen data set using pydarn.proc.music.calculateKarr().
Unlike plotKarr, this routine can plot a region-of-interest map showing features detected by pydarn.proc.music.detectSignals().
If the chosen data set has signals stored in the sigDetect attribute, numbers identifying each of the signals will
be plotted on the kArr plot.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to plot
fig : Optional[None or matplotlib.figure]
matplotlib figure object that will be plotted to. If not provided, one will be created.
maxSignals : Optional[None or int]
Maximum number of signals to plot if detected signals exist for the chosen data set.
roiPlot : Optional[bool]
If true, a region of interest plot showing the features detected using pydarn.proc.music.detectSignals()
will be displayed alongside the kArr plot.
Written by Nathaniel A. Frissell, Fall 2013
"""
if fig is None:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=figsize)
currentData = getDataSet(dataObj,dataSet)
from scipy import stats
import matplotlib.patheffects as PathEffects
# Do plotting here!
if roiPlot:
axis = fig.add_subplot(121,aspect='equal')
else:
axis = fig.add_subplot(111,aspect='equal')
# Page-wide header #############################################################
xpos = 0.130
fig.text(xpos,0.99,'Horizontal Wave Number',fontsize=20,va='top')
# Get the time limits.
timeLim = (np.min(currentData.time),np.max(currentData.time))
md = currentData.metadata
# Translate parameter information from short to long form.
paramDict = getParamDict(md['param'])
param = paramDict['param']
cbarLabel = paramDict['label']
text = md['name'] + ' ' + param.capitalize() + timeLim[0].strftime(' (%Y %b %d %H:%M - ') + timeLim[1].strftime('%Y %b %d %H:%M)')
if md.has_key('fir_filter'):
filt = md['fir_filter']
if filt[0] is None:
low = 'None'
else:
low = '%.2f' % (1000. * filt[0])
if filt[1] is None:
high = 'None'
else:
high = '%.2f' % (1000. * filt[1])
text = text + '\n' + 'Digital Filter: [' + low + ', ' + high + '] mHz'
fig.text(xpos,0.95,text,fontsize=14,va='top')
# End Page-wide header #########################################################
plotKarrAxis(dataObj,dataSet=dataSet,axis=axis,maxSignals=maxSignals)
if roiPlot:
################################################################################
# Feature detection...
data2 = currentData.sigDetect.labels
nrL, nrM = np.shape(data2)
scale = [0,data2.max()]
# Do plotting here!
axis = fig.add_subplot(122,aspect='equal')
verts = []
scan = []
# Plot Spectrum
for ll in range(nrL-1):
xx0 = currentData.kxVec[ll]
xx1 = currentData.kxVec[ll+1]
for mm in range(nrM-1):
scan.append(data2[ll,mm])
yy0 = currentData.kyVec[mm]
yy1 = currentData.kyVec[mm + 1]
x1,y1 = xx0, yy0
x2,y2 = xx1, yy0
x3,y3 = xx1, yy1
x4,y4 = xx0, yy1
verts.append(((x1,y1),(x2,y2),(x3,y3),(x4,y4),(x1,y1)))
cmap = matplotlib.cm.jet
bounds = np.linspace(scale[0],scale[1],256)
norm = matplotlib.colors.BoundaryNorm(bounds,cmap.N)
pcoll = PolyCollection(np.array(verts),edgecolors='face',linewidths=0,closed=False,cmap=cmap,norm=norm,zorder=99)
pcoll.set_array(np.array(scan))
axis.add_collection(pcoll,autolim=False)
axis.axvline(color='0.82',lw=2,zorder=150)
axis.axhline(color='0.82',lw=2,zorder=150)
# Colorbar
cbar = fig.colorbar(pcoll,orientation='vertical')#,shrink=.65,fraction=.1)
cbar.set_label('Region of Interest')
cbar.set_ticks([])
axis.set_xlim([np.min(currentData.kxVec),np.max(currentData.kxVec)])
axis.set_ylim([np.min(currentData.kyVec),np.max(currentData.kyVec)])
# Add wavelength to x/y tick labels ############################################
ticks = axis.get_xticks()
newLabels = []
for x in xrange(len(ticks)):
tck = ticks[x]
if tck != 0:
km = 2*np.pi/tck
km_txt = '%i' % km
else:
km_txt = ''
rad_txt = '%.2f' % tck
txt = '\n'.join([rad_txt,km_txt])
newLabels.append(txt)
axis.set_xticklabels(newLabels)
axis.set_xlabel(u'kx [rad]\n$\lambda$ [km]',ha='center')
ticks = axis.get_yticks()
newLabels = []
for y in xrange(len(ticks)):
tck = ticks[y]
if tck != 0:
km = 2*np.pi/tck
km_txt = '%i' % km
else:
km_txt = ''
rad_txt = '%.2f' % tck
txt = '\n'.join([rad_txt,km_txt])
newLabels.append(txt)
axis.set_yticklabels(newLabels)
axis.set_ylabel(u'ky [rad]\n$\lambda$ [km]',va='center')
# End add wavelength to x/y tick labels ########################################
if hasattr(currentData,'sigDetect'):
pe = [PathEffects.withStroke(linewidth=3,foreground='w')]
tmpList = range(currentData.sigDetect.nrSigs)[::-1] # Force list to plot backwards so number 1 is on top!
for signal in currentData.sigDetect.info:
if maxSignals is not None:
if signal['order'] > maxSignals: continue
xpos = currentData.kxVec[signal['maxpos'][0]]
ypos = currentData.kyVec[signal['maxpos'][1]]
txt = '%i' % signal['order']
axis.text(xpos,ypos,txt,color='k',zorder=200-signal['order'],size=24,path_effects=pe)
def plotKarrAxis(dataObj,dataSet='active',axis=None,maxSignals=None, sig_fontsize=24,x_labelpad=None,y_labelpad=None,
cbar_ticks=None, cbar_shrink=1.0, cbar_fraction=0.15,
cbar_gstext_offset=-0.075, cbar_gstext_fontsize=None,cbar_pad=0.05,cmap=None,plot_colorbar=True):
"""Plot the horizontal wave number array for a pydarn.proc.music.musicArray object. The kArr must have aready
been calculated for the chosen data set using pydarn.proc.music.calculateKarr().
If the chosen data set has signals stored in the sigDetect attribute, numbers identifying each of the signals will
be plotted on the kArr plot.
This routine will make the plot without titles, etc. It is used as the foundation for plotKarr() and plotKarrDetected().
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to plot
axis : Optional[matplotlib.figure.axis]
matplotlib axis object that will be plotted to. If not provided, this function will return.
maxSignals : Optional[None or int]
Maximum number of signals to plot if detected signals exist for the chosen data set.
sig_fontsize : Optional[float]
fontsize of signal markers
cbar_ticks : Optional[list]
Where to put the ticks on the color bar.
cbar_shrink : Optional[float]
fraction by which to shrink the colorbar
cbar_fraction : Optional[float]
fraction of original axes to use for colorbar
cbar_gstext_offset : Optional[float]
y-offset from colorbar of "Ground Scatter Only" text
cbar_gstext_fontsize : Optional[float]
fontsize of "Ground Scatter Only" text
cmap : Optional[None or matplotlib colormap object]
If None and cmap_handling=='matplotlib', use jet.
plot_colorbar : Optional[bool]
Enable or disable colorbar plotting.
Returns
-------
return_dict
Written by Nathaniel A. Frissell, Fall 2013
"""
if axis is None: return
return_dict = {}
fig = axis.get_figure()
from scipy import stats
import matplotlib.patheffects as PathEffects
currentData = getDataSet(dataObj,dataSet)
data = np.abs(currentData.karr) - np.min(np.abs(currentData.karr))
# Determine scale for colorbar.
sd = stats.nanstd(data,axis=None)
mean = stats.nanmean(data,axis=None)
scMax = mean + 6.5*sd
data = data / scMax
scale = [0.,1.]
nrL, nrM = np.shape(data)
verts = []
scan = []
# Plot Spectrum
for ll in range(nrL-1):
xx0 = currentData.kxVec[ll]
xx1 = currentData.kxVec[ll+1]
for mm in range(nrM-1):
scan.append(data[ll,mm])
yy0 = currentData.kyVec[mm]
yy1 = currentData.kyVec[mm + 1]
x1,y1 = xx0, yy0
x2,y2 = xx1, yy0
x3,y3 = xx1, yy1
x4,y4 = xx0, yy1
verts.append(((x1,y1),(x2,y2),(x3,y3),(x4,y4),(x1,y1)))
if cmap is None:
cmap = matplotlib.cm.jet
bounds = np.linspace(scale[0],scale[1],256)
norm = matplotlib.colors.BoundaryNorm(bounds,cmap.N)
pcoll = PolyCollection(np.array(verts),edgecolors='face',linewidths=0,closed=False,cmap=cmap,norm=norm,zorder=99)
pcoll.set_array(np.array(scan))
axis.add_collection(pcoll,autolim=False)
################################################################################
# Annotations
axis.axvline(color='0.82',lw=2,zorder=150)
axis.axhline(color='0.82',lw=2,zorder=150)
# Colorbar
cbar_label = 'Normalized Wavenumber Power'
if plot_colorbar:
cbar = fig.colorbar(pcoll,orientation='vertical',shrink=cbar_shrink,fraction=cbar_fraction,pad=cbar_pad)
cbar.set_label(cbar_label)
if not cbar_ticks:
cbar_ticks = np.arange(10)/10.
cbar.set_ticks(cbar_ticks)
if currentData.metadata.has_key('gscat'):
if currentData.metadata['gscat'] == 1:
cbar.ax.text(0.5,cbar_gstext_offset,'Ground\nscat\nonly',ha='center',fontsize=cbar_gstext_fontsize)
# cbar = fig.colorbar(pcoll,orientation='vertical')#,shrink=.65,fraction=.1)
# cbar.set_label('ABS(Spectral Density)')
# cbar.set_ticks(np.arange(10)/10.)
# if currentData.metadata.has_key('gscat'):
# if currentData.metadata['gscat'] == 1:
# cbar.ax.text(0.5,-0.075,'Ground\nscat\nonly',ha='center')
axis.set_xlim([np.min(currentData.kxVec),np.max(currentData.kxVec)])
axis.set_ylim([np.min(currentData.kyVec),np.max(currentData.kyVec)])
# Add wavelength to x/y tick labels ############################################
ticks = axis.get_xticks()
newLabels = []
for x in xrange(len(ticks)):
tck = ticks[x]
if tck != 0:
km = 2*np.pi/tck
km_txt = '%i' % km
else:
km_txt = ''
rad_txt = '%.2f' % tck
txt = '\n'.join([rad_txt,km_txt])
newLabels.append(txt)
axis.set_xticklabels(newLabels)
axis.set_xlabel(u'kx [rad]\n$\lambda$ [km]',ha='center',labelpad=x_labelpad)
# axis.set_xlabel('%f' % x_labelpad,ha='center',labelpad=x_labelpad)
ticks = axis.get_yticks()
newLabels = []
for y in xrange(len(ticks)):
tck = ticks[y]
if tck != 0:
km = 2*np.pi/tck
km_txt = '%i' % km
else:
km_txt = ''
rad_txt = '%.2f' % tck
txt = '\n'.join([km_txt,rad_txt])
newLabels.append(txt)
axis.set_yticklabels(newLabels,rotation=90.)
axis.set_ylabel(u'ky [rad]\n$\lambda$ [km]',va='center',labelpad=y_labelpad)
# End add wavelength to x/y tick labels ########################################
md = currentData.metadata
# Translate parameter information from short to long form.
paramDict = getParamDict(md['param'])
param = paramDict['param']
cbarLabel = paramDict['label']
if hasattr(currentData,'sigDetect'):
pe = [PathEffects.withStroke(linewidth=3,foreground='w')]
for signal in currentData.sigDetect.info:
if maxSignals is not None:
if signal['order'] > maxSignals: continue
xpos = currentData.kxVec[signal['maxpos'][0]]
ypos = currentData.kyVec[signal['maxpos'][1]]
txt = '%i' % signal['order']
axis.text(xpos,ypos,txt,color='k',zorder=200-signal['order'],size=sig_fontsize,path_effects=pe)
return_dict['cbar_pcoll'] = pcoll
return_dict['cbar_label'] = cbar_label
return return_dict
| gpl-3.0 |
sanketloke/scikit-learn | examples/bicluster/bicluster_newsgroups.py | 142 | 7183 | """
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
Output::
Vectorizing...
Coclustering...
Done in 9.53s. V-measure: 0.4455
MiniBatchKMeans...
Done in 12.00s. V-measure: 0.3309
Best biclusters:
----------------
bicluster 0 : 1951 documents, 4373 words
categories : 23% talk.politics.guns, 19% talk.politics.misc, 14% sci.med
words : gun, guns, geb, banks, firearms, drugs, gordon, clinton, cdt, amendment
bicluster 1 : 1165 documents, 3304 words
categories : 29% talk.politics.mideast, 26% soc.religion.christian, 25% alt.atheism
words : god, jesus, christians, atheists, kent, sin, morality, belief, resurrection, marriage
bicluster 2 : 2219 documents, 2830 words
categories : 18% comp.sys.mac.hardware, 16% comp.sys.ibm.pc.hardware, 16% comp.graphics
words : voltage, dsp, board, receiver, circuit, shipping, packages, stereo, compression, package
bicluster 3 : 1860 documents, 2745 words
categories : 26% rec.motorcycles, 23% rec.autos, 13% misc.forsale
words : bike, car, dod, engine, motorcycle, ride, honda, cars, bmw, bikes
bicluster 4 : 12 documents, 155 words
categories : 100% rec.sport.hockey
words : scorer, unassisted, reichel, semak, sweeney, kovalenko, ricci, audette, momesso, nedved
"""
from __future__ import print_function
print(__doc__)
from collections import defaultdict
import operator
import re
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
def number_aware_tokenizer(doc):
""" Tokenizer that maps all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
tokens = token_pattern.findall(doc)
tokens = ["#NUMBER" if token[0] in "0123456789_" else token
for token in tokens]
return tokens
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = TfidfVectorizer(stop_words='english', min_df=5,
tokenizer=number_aware_tokenizer)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
# Note: the following is identical to X[rows[:, np.newaxis], cols].sum() but
# much faster in scipy <= 0.16
weight = X[rows][:, cols].sum()
cut = (X[row_complement][:, cols].sum() +
X[rows][:, col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
| bsd-3-clause |
davidgbe/scikit-learn | examples/linear_model/plot_lasso_coordinate_descent_path.py | 254 | 2639 | """
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
The coefficients can be forced to be positive.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(axis=0) # Standardize data (easier to set the l1_ratio parameter)
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print("Computing regularization path using the lasso...")
alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps, fit_intercept=False)
print("Computing regularization path using the positive lasso...")
alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path(
X, y, eps, positive=True, fit_intercept=False)
print("Computing regularization path using the elastic net...")
alphas_enet, coefs_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, fit_intercept=False)
print("Computing regularization path using the positve elastic net...")
alphas_positive_enet, coefs_positive_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False)
# Display results
plt.figure(1)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_enet), coefs_enet.T, linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and Elastic-Net Paths')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
plt.axis('tight')
plt.figure(2)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_positive_lasso), coefs_positive_lasso.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and positive Lasso')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left')
plt.axis('tight')
plt.figure(3)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_enet), coefs_enet.T)
l2 = plt.plot(-np.log10(alphas_positive_enet), coefs_positive_enet.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Elastic-Net and positive Elastic-Net')
plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'),
loc='lower left')
plt.axis('tight')
plt.show()
| bsd-3-clause |
khrapovs/mygmm | docs/source/conf.py | 1 | 9327 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# mygmm documentation build configuration file, created by
# sphinx-quickstart on Sun Dec 21 16:17:43 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# -- Khrapov --------------------------------------------------------------
sys.path.insert(0, os.path.abspath('../..'))
import mock
MOCK_MODULES = ['numpy', 'seaborn', 'matplotlib', 'matplotlib.pylab',
'statsmodels', 'statsmodels.tsa', 'statsmodels.tsa.tsatools',
'scipy', 'scipy.stats', 'scipy.optimize', 'scipy.linalg',
'numdifftools', 'pandas']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# on_rtd is whether we are on readthedocs.org,
# this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
numpydoc_class_members_toctree = False
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'numpydoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'mygmm'
copyright = '2014, Stanislav Khrapov'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'mygmmdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'mygmm.tex', 'mygmm Documentation',
'Stanislav Khrapov', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mygmm', 'mygmm Documentation',
['Stanislav Khrapov'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'mygmm', 'mygmm Documentation',
'Stanislav Khrapov', 'mygmm', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit |
cms-btv-pog/rootpy | docs/sphinxext/ipython_directive.py | 31 | 27191 | # -*- coding: utf-8 -*-
"""Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives).
By default this directive assumes that your prompts are unchanged IPython ones,
but this can be customized. The configurable options that can be placed in
conf.py are
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import cStringIO
import os
import re
import sys
import tempfile
import ast
# To keep compatibility with various python versions
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Third-party
import matplotlib
import sphinx
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.util.compat import Directive
matplotlib.use('Agg')
# Our own
from IPython import Config, InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = range(3)
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
inputline += '\n' + nextline[Nc:]
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self):
self.cout = cStringIO.StringIO()
# Create config object for IPython
config = Config()
config.Global.display_banner = False
config.Global.exec_lines = ['import numpy as np',
'from pylab import *'
]
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize ipython, but don't start its mainloop
IP = InteractiveShell.instance(config=config, profile_dir=profile)
# io.stdout redirect must be done *after* instantiating InteractiveShell
io.stdout = self.cout
io.stderr = self.cout
# For debugging, so we can see normal output, use this:
#from IPython.utils.io import Tee
#io.stdout = Tee(self.cout, channel='stdout') # dbg
#io.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
#print "input='%s'"%self.input
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
source_raw = splitter.source_raw_reset()[1]
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
outfile = os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""Process data block for INPUT token."""
decorator, input, rest = data
image_file = None
image_directive = None
#print 'INPUT:', data # dbg
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = decorator=='@doctest' or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i==0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=True)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=True)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output)
elif is_semicolon: # get spacing right
ret.append('')
self.cout.truncate(0)
return (ret, input_lines, output, is_doctest, image_file,
image_directive)
#print 'OUTPUT', output # dbg
def process_output(self, data, output_prompt,
input_lines, output, is_doctest, image_file):
"""Process data block for OUTPUT token."""
if is_doctest:
submitted = data.strip()
found = output
if found is not None:
found = found.strip()
# XXX - fperez: in 0.11, 'output' never comes with the prompt
# in it, just the actual output text. So I think all this code
# can be nuked...
# the above comment does not appear to be accurate... (minrk)
ind = found.find(output_prompt)
if ind<0:
e='output prompt="%s" does not match out line=%s' % \
(output_prompt, found)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
if found!=submitted:
e = ('doctest failure for input_lines="%s" with '
'found_output="%s" and submitted output="%s"' %
(input_lines, found, submitted) )
raise RuntimeError(e)
#print 'doctest PASSED for input_lines="%s" with found_output="%s" and submitted output="%s"'%(input_lines, found, submitted)
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = 'plt.gcf().savefig("%s")'%image_file
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin%lineno
output_prompt = self.promptout%lineno
image_file = None
image_directive = None
for token, data in block:
if token==COMMENT:
out_data = self.process_comment(data)
elif token==INPUT:
(out_data, input_lines, output, is_doctest, image_file,
image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token==OUTPUT:
out_data = \
self.process_output(data, output_prompt,
input_lines, output, is_doctest,
image_file)
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
if self._pyplot_imported:
return
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive conent
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
ct = 0
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line)
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
# deal with lines checking for multiline
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
if not multiline:
modified = u"%s %s" % (fmtin % ct, line_stripped)
output.append(modified)
ct += 1
try:
ast.parse(line_stripped)
output.append(u'')
except Exception: # on a multiline
multiline = True
multiline_start = lineno
else: # still on a multiline
modified = u'%s %s' % (continuation, line)
output.append(modified)
try:
mod = ast.parse(
'\n'.join(content[multiline_start:lineno+1]))
if isinstance(mod.body[0], ast.FunctionDef):
# check to see if we have the whole function
for element in mod.body[0].body:
if isinstance(element, ast.Return):
multiline = False
else:
output.append(u'')
multiline = False
except Exception:
pass
if savefig: # clear figure if plotted
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
savefig = False
return output
class IpythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = { 'python': directives.unchanged,
'suppress' : directives.flag,
'verbatim' : directives.flag,
'doctest' : directives.flag,
}
shell = EmbeddedSphinxShell()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
confdir = self.state.document.settings.env.app.confdir
savefig_dir = config.ipython_savefig_dir
source_dir = os.path.dirname(self.state.document.current_source)
if savefig_dir is None:
savefig_dir = config.html_static_path
if isinstance(savefig_dir, list):
savefig_dir = savefig_dir[0] # safe to assume only one path?
savefig_dir = os.path.join(confdir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
return savefig_dir, source_dir, rgxin, rgxout, promptin, promptout
def setup(self):
# reset the execution count if we haven't processed this doc
#NOTE: this may be borked if there are multiple seen_doc tmp files
#check time stamp?
seen_docs = [i for i in os.listdir(tempfile.tempdir)
if i.startswith('seen_doc')]
if seen_docs:
fname = os.path.join(tempfile.tempdir, seen_docs[0])
docs = open(fname).read().split('\n')
if not self.state.document.current_source in docs:
self.shell.IP.history_manager.reset()
self.shell.IP.execution_count = 1
else: # haven't processed any docs yet
docs = []
# get config values
(savefig_dir, source_dir, rgxin,
rgxout, promptin, promptout) = self.get_config_options()
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
store_history=False)
self.shell.clear_cout()
# write the filename to a tempfile because it's been "seen" now
if not self.state.document.current_source in docs:
fd, fname = tempfile.mkstemp(prefix="seen_doc", text=True)
fout = open(fname, 'a')
fout.write(self.state.document.current_source+'\n')
fout.close()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
#TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python(content)
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython','']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
lines.extend([' %s'%line for line in row.split('\n')])
if figure is not None:
figures.append(figure)
#text = '\n'.join(lines)
#figs = '\n'.join(figures)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
#print lines
if len(lines)>2:
if debug:
print '\n'.join(lines)
else: #NOTE: this raises some errors, what's it for?
#print 'INSERTING %d lines'%len(lines)
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
text = '\n'.join(lines)
txtnode = nodes.literal_block(text, text)
txtnode['language'] = 'ipython'
#imgnode = nodes.image(figs)
# cleanup
self.teardown()
return []#, imgnode]
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IpythonDirective)
app.add_config_value('ipython_savefig_dir', None, True)
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), True)
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), True)
app.add_config_value('ipython_promptin', 'In [%d]:', True)
app.add_config_value('ipython_promptout', 'Out[%d]:', True)
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
""",
]
# skip local-file depending first example:
examples = examples[1:]
#ipython_directive.DEBUG = True # dbg
#options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
ipython_directive('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print 'All OK? Check figures in _static/'
| gpl-3.0 |
jmchandonia/narrative | kbase-extension/jupyter_kernel_config.py | 4 | 15485 | # Configuration file for ipython-kernel.
c = get_config()
#------------------------------------------------------------------------------
# IPKernelApp configuration
#------------------------------------------------------------------------------
# IPython: an enhanced interactive Python shell.
# IPKernelApp will inherit config from: BaseIPythonApplication, Application,
# InteractiveShellApp, ConnectionFileMixin
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.IPKernelApp.exec_PYTHONSTARTUP = True
# The importstring for the DisplayHook factory
# c.IPKernelApp.displayhook_class = 'IPython.kernel.zmq.displayhook.ZMQDisplayHook'
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.IPKernelApp.ip = u''
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.IPKernelApp.pylab = None
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.IPKernelApp.verbose_crash = False
# The Kernel subclass to be used.
#
# This should allow easy re-use of the IPKernelApp entry point to configure and
# launch kernels other than IPython's own.
# c.IPKernelApp.kernel_class = <class 'IPython.kernel.zmq.ipkernel.IPythonKernel'>
# Run the module as a script.
# c.IPKernelApp.module_to_run = ''
# The date format used by logging formatters for %(asctime)s
# c.IPKernelApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# set the shell (ROUTER) port [default: random]
# c.IPKernelApp.shell_port = 0
# set the control (ROUTER) port [default: random]
# c.IPKernelApp.control_port = 0
# Whether to overwrite existing config files when copying
# c.IPKernelApp.overwrite = False
# Execute the given command string.
# c.IPKernelApp.code_to_run = ''
# set the stdin (ROUTER) port [default: random]
# c.IPKernelApp.stdin_port = 0
# Set the log level by value or name.
# c.IPKernelApp.log_level = 30
# lines of code to run at IPython startup.
c.InteractiveShellApp.exec_lines = [ 'import biokbase.narrative.magics',
'from biokbase.narrative.services import *',
'from biokbase.narrative.widgetmanager import WidgetManager',
'from biokbase.narrative.jobs import *' ]
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.IPKernelApp.extra_config_file = u''
# The importstring for the OutStream factory
# c.IPKernelApp.outstream_class = 'IPython.kernel.zmq.iostream.OutStream'
# Whether to create profile dir if it doesn't exist
# c.IPKernelApp.auto_create = False
# set the heartbeat port [default: random]
# c.IPKernelApp.hb_port = 0
#
# c.IPKernelApp.transport = 'tcp'
# redirect stdout to the null device
# c.IPKernelApp.no_stdout = False
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.IPKernelApp.hide_initial_ns = True
# dotted module name of an IPython extension to load.
# c.IPKernelApp.extra_extension = ''
# A file to be run
# c.IPKernelApp.file_to_run = ''
# The IPython profile to use.
# c.IPKernelApp.profile = u'default'
# kill this process if its parent dies. On Windows, the argument specifies the
# HANDLE of the parent process, otherwise it is simply boolean.
# c.IPKernelApp.parent_handle = 0
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.IPKernelApp.connection_file = ''
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.IPKernelApp.pylab_import_all = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# c.IPKernelApp.ipython_dir = u''
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.IPKernelApp.matplotlib = None
# ONLY USED ON WINDOWS Interrupt this process when the parent is signaled.
# c.IPKernelApp.interrupt = 0
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.IPKernelApp.copy_config_files = False
# List of files to run at IPython startup.
# c.IPKernelApp.exec_files = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.IPKernelApp.gui = None
# Reraise exceptions encountered loading IPython extensions?
# c.IPKernelApp.reraise_ipython_extension_failures = False
# A list of dotted module names of IPython extensions to load.
# c.IPKernelApp.extensions = []
# redirect stderr to the null device
# c.IPKernelApp.no_stderr = False
# The Logging format template
# c.IPKernelApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# set the iopub (PUB) port [default: random]
# c.IPKernelApp.iopub_port = 0
#------------------------------------------------------------------------------
# IPythonKernel configuration
#------------------------------------------------------------------------------
# IPythonKernel will inherit config from: Kernel
# Whether to use appnope for compatiblity with OS X App Nap.
#
# Only affects OS X >= 10.9.
# c.IPythonKernel._darwin_app_nap = True
#
# c.IPythonKernel._execute_sleep = 0.0005
#
# c.IPythonKernel._poll_interval = 0.05
#------------------------------------------------------------------------------
# ZMQInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of InteractiveShell for ZMQ.
# ZMQInteractiveShell will inherit config from: InteractiveShell
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQInteractiveShell.color_info = True
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.ZMQInteractiveShell.ast_transformers = []
#
# c.ZMQInteractiveShell.history_length = 10000
# Don't call post-execute functions that have failed in the past.
# c.ZMQInteractiveShell.disable_failing_post_execute = False
# Show rewritten input, e.g. for autocall.
# c.ZMQInteractiveShell.show_rewritten_input = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.ZMQInteractiveShell.colors = 'LightBG'
# If True, anything that would be passed to the pager will be displayed as
# regular output instead.
# c.ZMQInteractiveShell.display_page = False
#
# c.ZMQInteractiveShell.separate_in = '\n'
# Enable magic commands to be called without the leading %.
# c.ZMQInteractiveShell.automagic = True
# Deprecated, use PromptManager.in2_template
# c.ZMQInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.ZMQInteractiveShell.separate_out = ''
# Deprecated, use PromptManager.in_template
# c.ZMQInteractiveShell.prompt_in1 = 'In [\\#]: '
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.ZMQInteractiveShell.deep_reload = False
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.ZMQInteractiveShell.autocall = 0
#
# c.ZMQInteractiveShell.separate_out2 = ''
# Deprecated, use PromptManager.justify
# c.ZMQInteractiveShell.prompts_pad_left = True
# The part of the banner to be printed before the profile
# c.ZMQInteractiveShell.banner1 = 'Python 2.7.6 (default, Nov 18 2013, 15:12:51) \nType "copyright", "credits" or "license" for more information.\n\nIPython 3.2.0-dev -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
#
# c.ZMQInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# The part of the banner to be printed after the profile
# c.ZMQInteractiveShell.banner2 = ''
#
# c.ZMQInteractiveShell.debug = False
#
# c.ZMQInteractiveShell.object_info_string_level = 0
#
# c.ZMQInteractiveShell.ipython_dir = ''
#
# c.ZMQInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
# c.ZMQInteractiveShell.logstart = False
# The name of the logfile to use.
# c.ZMQInteractiveShell.logfile = ''
#
# c.ZMQInteractiveShell.wildcards_case_sensitive = True
# Save multi-line entries as one entry in readline history
# c.ZMQInteractiveShell.multiline_history = True
# Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
# c.ZMQInteractiveShell.logappend = ''
#
# c.ZMQInteractiveShell.xmode = 'Context'
#
# c.ZMQInteractiveShell.quiet = False
# Deprecated, use PromptManager.out_template
# c.ZMQInteractiveShell.prompt_out = 'Out[\\#]: '
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.ZMQInteractiveShell.cache_size = 1000
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.ZMQInteractiveShell.ast_node_interactivity = 'last_expr'
# Automatically call the pdb debugger after every exception.
# c.ZMQInteractiveShell.pdb = False
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# Username for the Session. Default is your system username.
# c.Session.username = u''
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The UUID identifying this session.
# c.Session.session = u''
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# execution key, for signing messages.
# c.Session.key = ''
# Debug output in the Session
# c.Session.debug = False
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# path to file containing execution key.
# c.Session.keyfile = ''
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
| mit |
ywcui1990/htmresearch | projects/sequence_prediction/mackey_glass/visualize_results.py | 13 | 1819 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import csv
import sys
from matplotlib import pyplot
import numpy
def run(filename, predictionDelay):
with open(filename, 'rU') as infile:
reader = csv.reader(infile)
reader.next()
actuals = []
shifted = []
shifted += [0] * predictionDelay
for row in reader:
actuals.append(row[1])
shifted.append(row[2])
shifted = shifted[predictionDelay:len(actuals)]
actuals = actuals[predictionDelay:]
errors = abs(numpy.array(shifted, dtype=float) - numpy.array(actuals, dtype=float)).tolist()
pyplot.subplot(2, 1, 1)
pyplot.plot(shifted)
pyplot.plot(actuals)
pyplot.subplot(2, 1, 2)
pyplot.plot(errors)
pyplot.show()
if __name__ == "__main__":
predictionDelay = int(sys.argv[2]) if len(sys.argv) > 2 else 1
run(sys.argv[1], predictionDelay)
| agpl-3.0 |
pdamodaran/yellowbrick | yellowbrick/classifier/rocauc.py | 1 | 20061 | # yellowbrick.classifier.rocauc
# Implements visual ROC/AUC curves for classification evaluation.
#
# Author: Rebecca Bilbro <[email protected]>
# Author: Benjamin Bengfort <[email protected]>
# Author: Neal Humphrey
# Created: Wed May 18 12:39:40 2016 -0400
#
# Copyright (C) 2017 District Data Labs
# For license information, see LICENSE.txt
#
# ID: rocauc.py [5388065] [email protected] $
"""
Implements visual ROC/AUC curves for classification evaluation.
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
from ..exceptions import ModelError
from ..exceptions import YellowbrickValueError
from ..style.palettes import LINE_COLOR
from .base import ClassificationScoreVisualizer
from scipy import interp
from sklearn.preprocessing import label_binarize
from sklearn.model_selection import train_test_split
from sklearn.metrics import auc, roc_curve
# Dictionary keys for ROCAUC
MACRO = "macro"
MICRO = "micro"
##########################################################################
## ROCAUC Visualizer
##########################################################################
class ROCAUC(ClassificationScoreVisualizer):
"""
Receiver Operating Characteristic (ROC) curves are a measure of a
classifier's predictive quality that compares and visualizes the tradeoff
between the models' sensitivity and specificity. The ROC curve displays
the true positive rate on the Y axis and the false positive rate on the
X axis on both a global average and per-class basis. The ideal point is
therefore the top-left corner of the plot: false positives are zero and
true positives are one.
This leads to another metric, area under the curve (AUC), a computation
of the relationship between false positives and true positives. The higher
the AUC, the better the model generally is. However, it is also important
to inspect the "steepness" of the curve, as this describes the
maximization of the true positive rate while minimizing the false positive
rate. Generalizing "steepness" usually leads to discussions about
convexity, which we do not get into here.
Parameters
----------
model : estimator
Must be a classifier, otherwise raises YellowbrickTypeError
ax : matplotlib Axes, default: None
The axes to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
classes : list
A list of class names for the legend. If classes is None and a y value
is passed to fit then the classes are selected from the target vector.
Note that the curves must be computed based on what is in the target
vector passed to the ``score()`` method. Class names are used for
labeling only and must be in the correct order to prevent confusion.
micro : bool, default = True
Plot the micro-averages ROC curve, computed from the sum of all true
positives and false positives across all classes. Micro is not defined
for binary classification problems with estimators with only a
decision_function method.
macro : bool, default = True
Plot the macro-averages ROC curve, which simply takes the average of
curves across all classes. Macro is not defined for binary
classification problems with estimators with only a decision_function
method.
per_class : bool, default = True
Plot the ROC curves for each individual class. This should be set
to false if only the macro or micro average curves are required. Per-
class classification is not defined for binary classification problems
with estimators with only a decision_function method.
kwargs : keyword arguments passed to the super class.
Currently passing in hard-coded colors for the Receiver Operating
Characteristic curve and the diagonal.
These will be refactored to a default Yellowbrick style.
Attributes
----------
score_ : float
Global accuracy score, unless micro or macro scores are requested
Notes
-----
ROC curves are typically used in binary classification, and in fact the
Scikit-Learn ``roc_curve`` metric is only able to perform metrics for
binary classifiers. As a result it is necessary to binarize the output or
to use one-vs-rest or one-vs-all strategies of classification. The
visualizer does its best to handle multiple situations, but exceptions can
arise from unexpected models or outputs.
Another important point is the relationship of class labels specified on
initialization to those drawn on the curves. The classes are not used to
constrain ordering or filter curves; the ROC computation happens on the
unique values specified in the target vector to the ``score`` method. To
ensure the best quality visualization, do not use a LabelEncoder for this
and do not pass in class labels.
.. seealso:: http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
.. todo:: Allow the class list to filter the curves on the visualization.
Examples
--------
>>> from yellowbrick.classifier import ROCAUC
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.model_selection import train_test_split
>>> data = load_data("occupancy")
>>> features = ["temp", "relative humidity", "light", "C02", "humidity"]
>>> X_train, X_test, y_train, y_test = train_test_split(X, y)
>>> oz = ROCAUC(LogisticRegression())
>>> oz.fit(X_train, y_train)
>>> oz.score(X_test, y_test)
>>> oz.poof()
"""
def __init__(self, model, ax=None, classes=None,
micro=True, macro=True, per_class=True, **kwargs):
super(ROCAUC, self).__init__(model, ax=ax, classes=classes, **kwargs)
# Set the visual parameters for ROCAUC
self.micro = micro
self.macro = macro
self.per_class = per_class
def score(self, X, y=None, **kwargs):
"""
Generates the predicted target values using the Scikit-Learn
estimator.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
Returns
-------
score_ : float
Global accuracy unless micro or macro scores are requested.
"""
# Compute the predictions for the test data
y_pred = self._get_y_scores(X)
# Note: In the above, _get_y_scores calls either a decision_function or
# predict_proba, which should return a 2D array. But in a binary
# classification using an estimator with only a decision_function, y_pred
# will instead be 1D, meaning only one curve can be plotted. In this case,
# we set the _binary_decision attribute to True to ensure only one curve is
# computed and plotted later on.
if y_pred.ndim == 1:
self._binary_decision = True
# Raise an error if it's a binary decision and user has set micro,
# macro, or per_class to True
if self.micro or self.macro or self.per_class:
raise ModelError(
"Micro, macro, and per-class scores are not defined for "
"binary classification for estimators with only "
"decision_function methods; set micro, macro, and "
"per-class params to False."
)
else:
self._binary_decision = False
# If it's not a binary decision, at least one of micro, macro, or
# per_class must be True
if not self.micro and not self.macro and not self.per_class:
raise YellowbrickValueError(
"no curves will be drawn; specify micro, macro, or per_class"
)
# Classes may be label encoded so only use what's in y to compute.
# The self.classes_ attribute will be used as names for labels.
classes = np.unique(y)
n_classes = len(classes)
# Store the false positive rate, true positive rate and curve info.
self.fpr = dict()
self.tpr = dict()
self.roc_auc = dict()
# If the decision is binary, compute the ROC curve and ROC area
if self._binary_decision == True:
self.fpr[0], self.tpr[0], _ = roc_curve(y, y_pred)
self.roc_auc[0] = auc(self.fpr[0], self.tpr[0])
else:
# Otherwise compute the ROC curve and ROC area for each class
for i, c in enumerate(classes):
self.fpr[i], self.tpr[i], _ = roc_curve(y, y_pred[:,i], pos_label=c)
self.roc_auc[i] = auc(self.fpr[i], self.tpr[i])
# Compute micro average
if self.micro:
self._score_micro_average(y, y_pred, classes, n_classes)
# Compute macro average
if self.macro:
self._score_macro_average(n_classes)
# Draw the Curves
self.draw()
# Set score to micro average if specified
if self.micro:
self.score_ = self.roc_auc[MICRO]
# Set score to macro average if not micro
if self.macro:
self.score_ = self.roc_auc[MACRO]
# Set score to the base score if neither macro nor micro
self.score_ = self.estimator.score(X, y)
return self.score_
def draw(self):
"""
Renders ROC-AUC plot.
Called internally by score, possibly more than once
Returns
-------
ax : the axis with the plotted figure
"""
colors = self.colors[0:len(self.classes_)]
n_classes = len(colors)
# If it's a binary decision, plot the single ROC curve
if self._binary_decision == True:
self.ax.plot(
self.fpr[0], self.tpr[0],
label='ROC for binary decision, AUC = {:0.2f}'.format(
self.roc_auc[0]
)
)
# If per-class plotting is requested, plot ROC curves for each class
if self.per_class:
for i, color in zip(range(n_classes), colors):
self.ax.plot(
self.fpr[i], self.tpr[i], color=color,
label='ROC of class {}, AUC = {:0.2f}'.format(
self.classes_[i], self.roc_auc[i],
)
)
# If requested, plot the ROC curve for the micro average
if self.micro:
self.ax.plot(
self.fpr[MICRO], self.tpr[MICRO], linestyle="--",
color= self.colors[len(self.classes_)-1],
label='micro-average ROC curve, AUC = {:0.2f}'.format(
self.roc_auc["micro"],
)
)
# If requested, plot the ROC curve for the macro average
if self.macro:
self.ax.plot(
self.fpr[MACRO], self.tpr[MACRO], linestyle="--",
color= self.colors[len(self.classes_)-1],
label='macro-average ROC curve, AUC = {:0.2f}'.format(
self.roc_auc["macro"],
)
)
# Plot the line of no discrimination to compare the curve to.
self.ax.plot([0,1], [0,1], linestyle=':', c=LINE_COLOR)
return self.ax
def finalize(self, **kwargs):
"""
Finalize executes any subclass-specific axes finalization steps.
The user calls poof and poof calls finalize.
Parameters
----------
kwargs: generic keyword arguments.
"""
# Set the title and add the legend
self.set_title('ROC Curves for {}'.format(self.name))
self.ax.legend(loc='lower right', frameon=True)
# Set the limits for the ROC/AUC (always between 0 and 1)
self.ax.set_xlim([0.0, 1.0])
self.ax.set_ylim([0.0, 1.0])
# Set x and y axis labels
self.ax.set_ylabel('True Postive Rate')
self.ax.set_xlabel('False Positive Rate')
def _get_y_scores(self, X):
"""
The ``roc_curve`` metric requires target scores that can either be the
probability estimates of the positive class, confidence values or non-
thresholded measure of decisions (as returned by "decision_function").
This method computes the scores by resolving the estimator methods
that retreive these values.
.. todo:: implement confidence values metric.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features -- generally the test data
that is associated with y_true values.
"""
# The resolution order of scoring functions
attrs = (
'predict_proba',
'decision_function',
)
# Return the first resolved function
for attr in attrs:
try:
method = getattr(self.estimator, attr, None)
if method:
return method(X)
except AttributeError:
# Some Scikit-Learn estimators have both probability and
# decision functions but override __getattr__ and raise an
# AttributeError on access.
# Note that because of the ordering of our attrs above,
# estimators with both will *only* ever use probability.
continue
# If we've gotten this far, raise an error
raise ModelError(
"ROCAUC requires estimators with predict_proba or "
"decision_function methods."
)
def _score_micro_average(self, y, y_pred, classes, n_classes):
"""
Compute the micro average scores for the ROCAUC curves.
"""
# Convert y to binarized array for micro and macro scores
y = label_binarize(y, classes=classes)
if n_classes == 2:
y = np.hstack((1-y, y))
# Compute micro-average
self.fpr[MICRO], self.tpr[MICRO], _ = roc_curve(y.ravel(), y_pred.ravel())
self.roc_auc[MICRO] = auc(self.fpr[MICRO], self.tpr[MICRO])
def _score_macro_average(self, n_classes):
"""
Compute the macro average scores for the ROCAUC curves.
"""
# Gather all FPRs
all_fpr = np.unique(np.concatenate([self.fpr[i] for i in range(n_classes)]))
avg_tpr = np.zeros_like(all_fpr)
# Compute the averages per class
for i in range(n_classes):
avg_tpr += interp(all_fpr, self.fpr[i], self.tpr[i])
# Finalize the average
avg_tpr /= n_classes
# Store the macro averages
self.fpr[MACRO] = all_fpr
self.tpr[MACRO] = avg_tpr
self.roc_auc[MACRO] = auc(self.fpr[MACRO], self.tpr[MACRO])
##########################################################################
## Quick method for ROCAUC
##########################################################################
def roc_auc(model, X, y=None, ax=None, **kwargs):
"""ROCAUC Quick method:
Receiver Operating Characteristic (ROC) curves are a measure of a
classifier's predictive quality that compares and visualizes the tradeoff
between the models' sensitivity and specificity. The ROC curve displays
the true positive rate on the Y axis and the false positive rate on the
X axis on both a global average and per-class basis. The ideal point is
therefore the top-left corner of the plot: false positives are zero and
true positives are one.
This leads to another metric, area under the curve (AUC), a computation
of the relationship between false positives and true positives. The higher
the AUC, the better the model generally is. However, it is also important
to inspect the "steepness" of the curve, as this describes the
maximization of the true positive rate while minimizing the false positive
rate. Generalizing "steepness" usually leads to discussions about
convexity, which we do not get into here.
Parameters
----------
model : the Scikit-Learn estimator
Should be an instance of a classifier, else the __init__ will
return an error.
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
ax : the axis to plot the figure on.
classes : list
A list of class names for the legend. If classes is None and a y value
is passed to fit then the classes are selected from the target vector.
Note that the curves must be computed based on what is in the target
vector passed to the ``score()`` method. Class names are used for
labeling only and must be in the correct order to prevent confusion.
micro : bool, default = True
Plot the micro-averages ROC curve, computed from the sum of all true
positives and false positives across all classes. Micro is not defined
for binary classification problems with estimators with only a
decision_function method.
macro : bool, default = True
Plot the macro-averages ROC curve, which simply takes the average of
curves across all classes. Macro is not defined for binary
classification problems with estimators with only a decision_function
method.
per_class : bool, default = True
Plot the ROC curves for each individual class. This should be set
to false if only the macro or micro average curves are required. Per-
class classification is not defined for binary classification problems
with estimators with only a decision_function method.
Notes
-----
ROC curves are typically used in binary classification, and in fact the
Scikit-Learn ``roc_curve`` metric is only able to perform metrics for
binary classifiers. As a result it is necessary to binarize the output or
to use one-vs-rest or one-vs-all strategies of classification. The
visualizer does its best to handle multiple situations, but exceptions can
arise from unexpected models or outputs.
Another important point is the relationship of class labels specified on
initialization to those drawn on the curves. The classes are not used to
constrain ordering or filter curves; the ROC computation happens on the
unique values specified in the target vector to the ``score`` method. To
ensure the best quality visualization, do not use a LabelEncoder for this
and do not pass in class labels.
.. seealso:: http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
.. todo:: Allow the class list to filter the curves on the visualization.
Examples
--------
>>> from yellowbrick.classifier import ROCAUC
>>> from sklearn.linear_model import LogisticRegression
>>> data = load_data("occupancy")
>>> features = ["temp", "relative humidity", "light", "C02", "humidity"]
>>> X = data[features].values
>>> y = data.occupancy.values
>>> roc_auc(LogisticRegression(), X, y)
Returns
-------
ax : matplotlib axes
Returns the axes that the roc-auc curve was drawn on.
"""
# Instantiate the visualizer
visualizer = ROCAUC(model, ax, **kwargs)
# Create the train and test splits
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Fit and transform the visualizer (calls draw)
visualizer.fit(X_train, y_train, **kwargs)
visualizer.score(X_test, y_test)
visualizer.finalize()
# Return the axes object on the visualizer
return visualizer.ax
| apache-2.0 |
markomanninen/strongs | isopsephy/search.py | 5 | 1537 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
# file: search.py
def find_cumulative_indices(list_of_numbers, search_sum):
"""
find_cumulative_indices([70, 58, 81, 909, 70, 215, 70, 1022, 580, 930, 898], 285) ->
[[4, 5],[5, 6]]
"""
u = 0
y = 0
result = []
for idx, val in enumerate(list_of_numbers):
y += list_of_numbers[idx]
while y >= search_sum:
if y == search_sum:
result.append(range(u, idx+1))
y -= list_of_numbers[u]
u += 1
# if matches are not found, empty string is returned
# for easier cell data handling on pandas dataframe
return result or ''
# http://stackoverflow.com/questions/21380268/matching-the-sum-of-values-on-string
def search_by_num(text, num):
return list2string(find_number(string2list(text), num))
def list2string(alist):
return " ".join(map(str, alist))
def string2list(slist):
return list(map(int, slist.split()))
def find_number(alist, total):
u = 0
y = 0 # the current sum of the interval (u .. v)
res = []
for v in range(0, len(alist)):
# at this point the interval sum y is smaller than the requested total,
# so we move the right end of the interval forward
y += alist[v]
while y >= total:
if y == total:
res.append(list2string(alist[ u : v+1 ]))
# if the current sum is too large, move the left end of the interval forward
y -= alist[u]
u += 1
return res | mit |
mrochan/gfrnet | caffe-gfrnet/examples/web_demo/app.py | 41 | 7793 | import os
import time
import cPickle
import datetime
import logging
import flask
import werkzeug
import optparse
import tornado.wsgi
import tornado.httpserver
import numpy as np
import pandas as pd
from PIL import Image
import cStringIO as StringIO
import urllib
import exifutil
import caffe
REPO_DIRNAME = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + '/../..')
UPLOAD_FOLDER = '/tmp/caffe_demos_uploads'
ALLOWED_IMAGE_EXTENSIONS = set(['png', 'bmp', 'jpg', 'jpe', 'jpeg', 'gif'])
# Obtain the flask app object
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('index.html', has_result=False)
@app.route('/classify_url', methods=['GET'])
def classify_url():
imageurl = flask.request.args.get('imageurl', '')
try:
string_buffer = StringIO.StringIO(
urllib.urlopen(imageurl).read())
image = caffe.io.load_image(string_buffer)
except Exception as err:
# For any exception we encounter in reading the image, we will just
# not continue.
logging.info('URL Image open error: %s', err)
return flask.render_template(
'index.html', has_result=True,
result=(False, 'Cannot open image from URL.')
)
logging.info('Image: %s', imageurl)
result = app.clf.classify_image(image)
return flask.render_template(
'index.html', has_result=True, result=result, imagesrc=imageurl)
@app.route('/classify_upload', methods=['POST'])
def classify_upload():
try:
# We will save the file to disk for possible data collection.
imagefile = flask.request.files['imagefile']
filename_ = str(datetime.datetime.now()).replace(' ', '_') + \
werkzeug.secure_filename(imagefile.filename)
filename = os.path.join(UPLOAD_FOLDER, filename_)
imagefile.save(filename)
logging.info('Saving to %s.', filename)
image = exifutil.open_oriented_im(filename)
except Exception as err:
logging.info('Uploaded image open error: %s', err)
return flask.render_template(
'index.html', has_result=True,
result=(False, 'Cannot open uploaded image.')
)
result = app.clf.classify_image(image)
return flask.render_template(
'index.html', has_result=True, result=result,
imagesrc=embed_image_html(image)
)
def embed_image_html(image):
"""Creates an image embedded in HTML base64 format."""
image_pil = Image.fromarray((255 * image).astype('uint8'))
image_pil = image_pil.resize((256, 256))
string_buf = StringIO.StringIO()
image_pil.save(string_buf, format='png')
data = string_buf.getvalue().encode('base64').replace('\n', '')
return 'data:image/png;base64,' + data
def allowed_file(filename):
return (
'.' in filename and
filename.rsplit('.', 1)[1] in ALLOWED_IMAGE_EXTENSIONS
)
class ImagenetClassifier(object):
default_args = {
'model_def_file': (
'{}/models/bvlc_reference_caffenet/deploy.prototxt'.format(REPO_DIRNAME)),
'pretrained_model_file': (
'{}/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'.format(REPO_DIRNAME)),
'mean_file': (
'{}/python/caffe/imagenet/ilsvrc_2012_mean.npy'.format(REPO_DIRNAME)),
'class_labels_file': (
'{}/data/ilsvrc12/synset_words.txt'.format(REPO_DIRNAME)),
'bet_file': (
'{}/data/ilsvrc12/imagenet.bet.pickle'.format(REPO_DIRNAME)),
}
for key, val in default_args.iteritems():
if not os.path.exists(val):
raise Exception(
"File for {} is missing. Should be at: {}".format(key, val))
default_args['image_dim'] = 256
default_args['raw_scale'] = 255.
def __init__(self, model_def_file, pretrained_model_file, mean_file,
raw_scale, class_labels_file, bet_file, image_dim, gpu_mode):
logging.info('Loading net and associated files...')
if gpu_mode:
caffe.set_mode_gpu()
else:
caffe.set_mode_cpu()
self.net = caffe.Classifier(
model_def_file, pretrained_model_file,
image_dims=(image_dim, image_dim), raw_scale=raw_scale,
mean=np.load(mean_file).mean(1).mean(1), channel_swap=(2, 1, 0)
)
with open(class_labels_file) as f:
labels_df = pd.DataFrame([
{
'synset_id': l.strip().split(' ')[0],
'name': ' '.join(l.strip().split(' ')[1:]).split(',')[0]
}
for l in f.readlines()
])
self.labels = labels_df.sort('synset_id')['name'].values
self.bet = cPickle.load(open(bet_file))
# A bias to prefer children nodes in single-chain paths
# I am setting the value to 0.1 as a quick, simple model.
# We could use better psychological models here...
self.bet['infogain'] -= np.array(self.bet['preferences']) * 0.1
def classify_image(self, image):
try:
starttime = time.time()
scores = self.net.predict([image], oversample=True).flatten()
endtime = time.time()
indices = (-scores).argsort()[:5]
predictions = self.labels[indices]
# In addition to the prediction text, we will also produce
# the length for the progress bar visualization.
meta = [
(p, '%.5f' % scores[i])
for i, p in zip(indices, predictions)
]
logging.info('result: %s', str(meta))
# Compute expected information gain
expected_infogain = np.dot(
self.bet['probmat'], scores[self.bet['idmapping']])
expected_infogain *= self.bet['infogain']
# sort the scores
infogain_sort = expected_infogain.argsort()[::-1]
bet_result = [(self.bet['words'][v], '%.5f' % expected_infogain[v])
for v in infogain_sort[:5]]
logging.info('bet result: %s', str(bet_result))
return (True, meta, bet_result, '%.3f' % (endtime - starttime))
except Exception as err:
logging.info('Classification error: %s', err)
return (False, 'Something went wrong when classifying the '
'image. Maybe try another one?')
def start_tornado(app, port=5000):
http_server = tornado.httpserver.HTTPServer(
tornado.wsgi.WSGIContainer(app))
http_server.listen(port)
print("Tornado server starting on port {}".format(port))
tornado.ioloop.IOLoop.instance().start()
def start_from_terminal(app):
"""
Parse command line options and start the server.
"""
parser = optparse.OptionParser()
parser.add_option(
'-d', '--debug',
help="enable debug mode",
action="store_true", default=False)
parser.add_option(
'-p', '--port',
help="which port to serve content on",
type='int', default=5000)
parser.add_option(
'-g', '--gpu',
help="use gpu mode",
action='store_true', default=False)
opts, args = parser.parse_args()
ImagenetClassifier.default_args.update({'gpu_mode': opts.gpu})
# Initialize classifier + warm start by forward for allocation
app.clf = ImagenetClassifier(**ImagenetClassifier.default_args)
app.clf.net.forward()
if opts.debug:
app.run(debug=True, host='0.0.0.0', port=opts.port)
else:
start_tornado(app, opts.port)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
if not os.path.exists(UPLOAD_FOLDER):
os.makedirs(UPLOAD_FOLDER)
start_from_terminal(app)
| mit |
riddlezyc/geolab | src/structure/cluster.py | 1 | 2173 | """
use .cluster file generater by lammps rerun, get the number of molecules in certain cluster and its change with time
"""
import matplotlib.pyplot as plt
from calc import ave_accum
dirName = r"F:\simulations\asphaltenes\production\longtime\athInHeptane\nvt\rerun\4p0/"
fileName = 'ath.cluster'
# label = 'ATH in Toluene'
label = 'ATH in Heptane'
with open(dirName + fileName, 'r') as foo:
dataFile = foo.read().split('ITEM: TIMESTEP\n')
print len(dataFile)
cluster = []
clusterNo = []
# first element is empty
for iframe, frame in enumerate(dataFile[1:]):
if iframe % 100 == 0:
print 'Frame No.: %d' % iframe
# remove the header and the last element(empty)
lines = frame.split('\n')[8:-1]
clusterByFrame = []
for line in lines:
id, mol, type, clusterid = line.split()
if [int(mol), int(clusterid)] not in clusterByFrame:
clusterByFrame.append([int(mol), int(clusterid)])
# print clusterByFrame
# print len(clusterByFrame)
xcluster = [x[1] for x in clusterByFrame]
unionCluster = set(xcluster) # remove repeated cluster id
# print xcluster
# print len(unionCluster)
count = []
for i in unionCluster:
cnttmp = xcluster.count(i)
count.append(cnttmp)
# print count
avecount = float(sum(count)) / len(count)
# print avecount
cluster.append(avecount)
clusterNo.append(len(unionCluster))
cumulave = ave_accum(cluster)
with open(dirName + 'cluster.dat', 'w') as foo:
print >> foo, '#frame clustersize ave_accum No_of_clusters'
for i, t in enumerate(cluster):
print >> foo, '%5d%10.4f%10.4f%5d' % (i, t, cumulave[i], clusterNo[i])
plt.figure(0, figsize=(8, 4))
figName = dirName + 'average cluster size.png'
plt.title('Average Cluster Size', fontsize=12)
plt.grid(True)
plt.xlabel('Frames', fontsize=12)
plt.ylabel('Number of molecules', fontsize=12)
plt.plot(cluster, label='%s' % label)
plt.plot(cumulave, label='average', linewidth=3)
plt.legend(loc='best', ncol=4, fontsize=12, shadow=False)
plt.savefig(figName, format='png', dpi=300)
plt.show()
plt.close(0)
| gpl-3.0 |
GauravBh1010tt/DeepLearn | fake news challenge (FNC-1)/util.py | 1 | 10812 | # -*- coding: utf-8 -*-
from csv import DictReader
from csv import DictWriter
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
# Initialise global variables
label_ref = {'agree': 0, 'disagree': 1, 'discuss': 2, 'unrelated': 3}
label_ref_rev = {0: 'agree', 1: 'disagree', 2: 'discuss', 3: 'unrelated'}
stop_words = [
"a", "about", "above", "across", "after", "afterwards", "again", "against", "all", "almost", "alone", "along",
"already", "also", "although", "always", "am", "among", "amongst", "amoungst", "amount", "an", "and", "another",
"any", "anyhow", "anyone", "anything", "anyway", "anywhere", "are", "around", "as", "at", "back", "be",
"became", "because", "become", "becomes", "becoming", "been", "before", "beforehand", "behind", "being",
"below", "beside", "besides", "between", "beyond", "bill", "both", "bottom", "but", "by", "call", "can", "co",
"con", "could", "cry", "de", "describe", "detail", "do", "done", "down", "due", "during", "each", "eg", "eight",
"either", "eleven", "else", "elsewhere", "empty", "enough", "etc", "even", "ever", "every", "everyone",
"everything", "everywhere", "except", "few", "fifteen", "fifty", "fill", "find", "fire", "first", "five", "for",
"former", "formerly", "forty", "found", "four", "from", "front", "full", "further", "get", "give", "go", "had",
"has", "have", "he", "hence", "her", "here", "hereafter", "hereby", "herein", "hereupon", "hers", "herself",
"him", "himself", "his", "how", "however", "hundred", "i", "ie", "if", "in", "inc", "indeed", "interest",
"into", "is", "it", "its", "itself", "keep", "last", "latter", "latterly", "least", "less", "ltd", "made",
"many", "may", "me", "meanwhile", "might", "mill", "mine", "more", "moreover", "most", "mostly", "move", "much",
"must", "my", "myself", "name", "namely", "neither", "nevertheless", "next", "nine", "nobody", "now", "nowhere",
"of", "off", "often", "on", "once", "one", "only", "onto", "or", "other", "others", "otherwise", "our", "ours",
"ourselves", "out", "over", "own", "part", "per", "perhaps", "please", "put", "rather", "re", "same", "see",
"serious", "several", "she", "should", "show", "side", "since", "sincere", "six", "sixty", "so", "some",
"somehow", "someone", "something", "sometime", "sometimes", "somewhere", "still", "such", "system", "take",
"ten", "than", "that", "the", "their", "them", "themselves", "then", "thence", "there", "thereafter", "thereby",
"therefore", "therein", "thereupon", "these", "they", "thick", "thin", "third", "this", "those", "though",
"three", "through", "throughout", "thru", "thus", "to", "together", "too", "top", "toward", "towards", "twelve",
"twenty", "two", "un", "under", "until", "up", "upon", "us", "very", "via", "was", "we", "well", "were", "what",
"whatever", "when", "whence", "whenever", "where", "whereafter", "whereas", "whereby", "wherein", "whereupon",
"wherever", "whether", "which", "while", "whither", "who", "whoever", "whole", "whom", "whose", "why", "will",
"with", "within", "without", "would", "yet", "you", "your", "yours", "yourself", "yourselves"
]
# Define data class
class FNCData:
"""
Define class for Fake News Challenge data
"""
def __init__(self, file_instances, file_bodies):
# Load data
self.instances = self.read(file_instances)
bodies = self.read(file_bodies)
self.heads = {}
self.bodies = {}
# Process instances
for instance in self.instances:
if instance['Headline'] not in self.heads:
head_id = len(self.heads)
self.heads[instance['Headline']] = head_id
instance['Body ID'] = int(instance['Body ID'])
# Process bodies
for body in bodies:
self.bodies[int(body['Body ID'])] = body['articleBody']
def read(self, filename):
"""
Read Fake News Challenge data from CSV file
Args:
filename: str, filename + extension
Returns:
rows: list, of dict per instance
"""
# Initialise
rows = []
# Process file
with open(filename, "r") as table:
r = DictReader(table)
for line in r:
rows.append(line)
return rows
# Define relevant functions
def pipeline_train(train, test, lim_unigram):
"""
Process train set, create relevant vectorizers
Args:
train: FNCData object, train set
test: FNCData object, test set
lim_unigram: int, number of most frequent words to consider
Returns:
train_set: list, of numpy arrays
train_stances: list, of ints
bow_vectorizer: sklearn CountVectorizer
tfreq_vectorizer: sklearn TfidfTransformer(use_idf=False)
tfidf_vectorizer: sklearn TfidfVectorizer()
"""
# Initialise
heads = []
heads_track = {}
bodies = []
bodies_track = {}
body_ids = []
id_ref = {}
train_set = []
train_stances = []
cos_track = {}
test_heads = []
test_heads_track = {}
test_bodies = []
test_bodies_track = {}
test_body_ids = []
head_tfidf_track = {}
body_tfidf_track = {}
# Identify unique heads and bodies
for instance in train.instances:
head = instance['Headline']
body_id = instance['Body ID']
if head not in heads_track:
heads.append(head)
heads_track[head] = 1
if body_id not in bodies_track:
bodies.append(train.bodies[body_id])
bodies_track[body_id] = 1
body_ids.append(body_id)
for instance in test.instances:
head = instance['Headline']
body_id = instance['Body ID']
if head not in test_heads_track:
test_heads.append(head)
test_heads_track[head] = 1
if body_id not in test_bodies_track:
test_bodies.append(test.bodies[body_id])
test_bodies_track[body_id] = 1
test_body_ids.append(body_id)
# Create reference dictionary
for i, elem in enumerate(heads + body_ids):
id_ref[elem] = i
# Create vectorizers and BOW and TF arrays for train set
bow_vectorizer = CountVectorizer(max_features=lim_unigram, stop_words=stop_words)
bow = bow_vectorizer.fit_transform(heads + bodies) # Train set only
tfreq_vectorizer = TfidfTransformer(use_idf=False).fit(bow)
tfreq = tfreq_vectorizer.transform(bow).toarray() # Train set only
tfidf_vectorizer = TfidfVectorizer(max_features=lim_unigram, stop_words=stop_words).\
fit(heads + bodies + test_heads + test_bodies) # Train and test sets
# Process train set
for instance in train.instances:
head = instance['Headline']
body_id = instance['Body ID']
head_tf = tfreq[id_ref[head]].reshape(1, -1)
body_tf = tfreq[id_ref[body_id]].reshape(1, -1)
if head not in head_tfidf_track:
head_tfidf = tfidf_vectorizer.transform([head]).toarray()
head_tfidf_track[head] = head_tfidf
else:
head_tfidf = head_tfidf_track[head]
if body_id not in body_tfidf_track:
body_tfidf = tfidf_vectorizer.transform([train.bodies[body_id]]).toarray()
body_tfidf_track[body_id] = body_tfidf
else:
body_tfidf = body_tfidf_track[body_id]
if (head, body_id) not in cos_track:
tfidf_cos = cosine_similarity(head_tfidf, body_tfidf)[0].reshape(1, 1)
cos_track[(head, body_id)] = tfidf_cos
else:
tfidf_cos = cos_track[(head, body_id)]
feat_vec = np.squeeze(np.c_[head_tf, body_tf, tfidf_cos])
train_set.append(feat_vec)
train_stances.append(label_ref[instance['Stance']])
return train_set, train_stances, bow_vectorizer, tfreq_vectorizer, tfidf_vectorizer
def pipeline_test(test, bow_vectorizer, tfreq_vectorizer, tfidf_vectorizer):
"""
Process test set
Args:
test: FNCData object, test set
bow_vectorizer: sklearn CountVectorizer
tfreq_vectorizer: sklearn TfidfTransformer(use_idf=False)
tfidf_vectorizer: sklearn TfidfVectorizer()
Returns:
test_set: list, of numpy arrays
"""
# Initialise
test_set = []
heads_track = {}
bodies_track = {}
cos_track = {}
# Process test set
for instance in test.instances:
head = instance['Headline']
body_id = instance['Body ID']
if head not in heads_track:
head_bow = bow_vectorizer.transform([head]).toarray()
head_tf = tfreq_vectorizer.transform(head_bow).toarray()[0].reshape(1, -1)
head_tfidf = tfidf_vectorizer.transform([head]).toarray().reshape(1, -1)
heads_track[head] = (head_tf, head_tfidf)
else:
head_tf = heads_track[head][0]
head_tfidf = heads_track[head][1]
if body_id not in bodies_track:
body_bow = bow_vectorizer.transform([test.bodies[body_id]]).toarray()
body_tf = tfreq_vectorizer.transform(body_bow).toarray()[0].reshape(1, -1)
body_tfidf = tfidf_vectorizer.transform([test.bodies[body_id]]).toarray().reshape(1, -1)
bodies_track[body_id] = (body_tf, body_tfidf)
else:
body_tf = bodies_track[body_id][0]
body_tfidf = bodies_track[body_id][1]
if (head, body_id) not in cos_track:
tfidf_cos = cosine_similarity(head_tfidf, body_tfidf)[0].reshape(1, 1)
cos_track[(head, body_id)] = tfidf_cos
else:
tfidf_cos = cos_track[(head, body_id)]
feat_vec = np.squeeze(np.c_[head_tf, body_tf, tfidf_cos])
test_set.append(feat_vec)
return test_set
def save_predictions(pred, file):
"""
Save predictions to CSV file
Args:
pred: numpy array, of numeric predictions
file: str, filename + extension
"""
with open(file, 'w') as csvfile:
fieldnames = ['Stance']
writer = DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for instance in pred:
writer.writerow({'Stance': label_ref_rev[instance]}) | mit |
potash/scikit-learn | examples/gaussian_process/plot_gpr_noisy.py | 104 | 3778 | """
=============================================================
Gaussian process regression (GPR) with noise-level estimation
=============================================================
This example illustrates that GPR with a sum-kernel including a WhiteKernel can
estimate the noise level of data. An illustration of the
log-marginal-likelihood (LML) landscape shows that there exist two local
maxima of LML. The first corresponds to a model with a high noise level and a
large length scale, which explains all variations in the data by noise. The
second one has a smaller noise level and shorter length scale, which explains
most of the variation by the noise-free functional relationship. The second
model has a higher likelihood; however, depending on the initial value for the
hyperparameters, the gradient-based optimization might also converge to the
high-noise solution. It is thus important to repeat the optimization several
times for different initializations.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import LogNorm
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, WhiteKernel
rng = np.random.RandomState(0)
X = rng.uniform(0, 5, 20)[:, np.newaxis]
y = 0.5 * np.sin(3 * X[:, 0]) + rng.normal(0, 0.5, X.shape[0])
# First run
plt.figure(0)
kernel = 1.0 * RBF(length_scale=100.0, length_scale_bounds=(1e-2, 1e3)) \
+ WhiteKernel(noise_level=1, noise_level_bounds=(1e-10, 1e+1))
gp = GaussianProcessRegressor(kernel=kernel,
alpha=0.0).fit(X, y)
X_ = np.linspace(0, 5, 100)
y_mean, y_cov = gp.predict(X_[:, np.newaxis], return_cov=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - np.sqrt(np.diag(y_cov)),
y_mean + np.sqrt(np.diag(y_cov)),
alpha=0.5, color='k')
plt.plot(X_, 0.5*np.sin(3*X_), 'r', lw=3, zorder=9)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10)
plt.title("Initial: %s\nOptimum: %s\nLog-Marginal-Likelihood: %s"
% (kernel, gp.kernel_,
gp.log_marginal_likelihood(gp.kernel_.theta)))
plt.tight_layout()
# Second run
plt.figure(1)
kernel = 1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-2, 1e3)) \
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-10, 1e+1))
gp = GaussianProcessRegressor(kernel=kernel,
alpha=0.0).fit(X, y)
X_ = np.linspace(0, 5, 100)
y_mean, y_cov = gp.predict(X_[:, np.newaxis], return_cov=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - np.sqrt(np.diag(y_cov)),
y_mean + np.sqrt(np.diag(y_cov)),
alpha=0.5, color='k')
plt.plot(X_, 0.5*np.sin(3*X_), 'r', lw=3, zorder=9)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10)
plt.title("Initial: %s\nOptimum: %s\nLog-Marginal-Likelihood: %s"
% (kernel, gp.kernel_,
gp.log_marginal_likelihood(gp.kernel_.theta)))
plt.tight_layout()
# Plot LML landscape
plt.figure(2)
theta0 = np.logspace(-2, 3, 49)
theta1 = np.logspace(-2, 0, 50)
Theta0, Theta1 = np.meshgrid(theta0, theta1)
LML = [[gp.log_marginal_likelihood(np.log([0.36, Theta0[i, j], Theta1[i, j]]))
for i in range(Theta0.shape[0])] for j in range(Theta0.shape[1])]
LML = np.array(LML).T
vmin, vmax = (-LML).min(), (-LML).max()
vmax = 50
plt.contour(Theta0, Theta1, -LML,
levels=np.logspace(np.log10(vmin), np.log10(vmax), 50),
norm=LogNorm(vmin=vmin, vmax=vmax))
plt.colorbar()
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Length-scale")
plt.ylabel("Noise-level")
plt.title("Log-marginal-likelihood")
plt.tight_layout()
plt.show()
| bsd-3-clause |
Sentient07/scikit-learn | examples/neighbors/plot_digits_kde_sampling.py | 108 | 2026 | """
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
| bsd-3-clause |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/tools/tests/test_join.py | 7 | 30695 | # pylint: disable=E1103
import nose
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas.compat import lrange
import pandas.compat as compat
from pandas.tools.merge import merge, concat
from pandas.util.testing import assert_frame_equal
from pandas import DataFrame, MultiIndex, Series
import pandas._join as _join
import pandas.util.testing as tm
from pandas.tools.tests.test_merge import get_test_data, N, NGROUPS
a_ = np.array
class TestJoin(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
# aggregate multiple columns
self.df = DataFrame({'key1': get_test_data(),
'key2': get_test_data(),
'data1': np.random.randn(N),
'data2': np.random.randn(N)})
# exclude a couple keys for fun
self.df = self.df[self.df['key2'] > 1]
self.df2 = DataFrame({'key1': get_test_data(n=N // 5),
'key2': get_test_data(ngroups=NGROUPS // 2,
n=N // 5),
'value': np.random.randn(N // 5)})
index, data = tm.getMixedTypeDict()
self.target = DataFrame(data, index=index)
# Join on string value
self.source = DataFrame({'MergedA': data['A'], 'MergedD': data['D']},
index=data['C'])
def test_cython_left_outer_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1], dtype=np.int64)
max_group = 5
ls, rs = _join.left_outer_join(left, right, max_group)
exp_ls = left.argsort(kind='mergesort')
exp_rs = right.argsort(kind='mergesort')
exp_li = a_([0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 7, 7, 8, 8, 9, 10])
exp_ri = a_([0, 0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3,
4, 5, 4, 5, 4, 5, -1, -1])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
self.assert_numpy_array_equal(ls, exp_ls, check_dtype=False)
self.assert_numpy_array_equal(rs, exp_rs, check_dtype=False)
def test_cython_right_outer_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1], dtype=np.int64)
max_group = 5
rs, ls = _join.left_outer_join(right, left, max_group)
exp_ls = left.argsort(kind='mergesort')
exp_rs = right.argsort(kind='mergesort')
# 0 1 1 1
exp_li = a_([0, 1, 2, 3, 4, 5, 3, 4, 5, 3, 4, 5,
# 2 2 4
6, 7, 8, 6, 7, 8, -1])
exp_ri = a_([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3,
4, 4, 4, 5, 5, 5, 6])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
self.assert_numpy_array_equal(ls, exp_ls, check_dtype=False)
self.assert_numpy_array_equal(rs, exp_rs, check_dtype=False)
def test_cython_inner_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1, 4], dtype=np.int64)
max_group = 5
ls, rs = _join.inner_join(left, right, max_group)
exp_ls = left.argsort(kind='mergesort')
exp_rs = right.argsort(kind='mergesort')
exp_li = a_([0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 7, 7, 8, 8])
exp_ri = a_([0, 0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3,
4, 5, 4, 5, 4, 5])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
self.assert_numpy_array_equal(ls, exp_ls, check_dtype=False)
self.assert_numpy_array_equal(rs, exp_rs, check_dtype=False)
def test_left_outer_join(self):
joined_key2 = merge(self.df, self.df2, on='key2')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='left')
joined_both = merge(self.df, self.df2)
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='left')
def test_right_outer_join(self):
joined_key2 = merge(self.df, self.df2, on='key2', how='right')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='right')
joined_both = merge(self.df, self.df2, how='right')
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='right')
def test_full_outer_join(self):
joined_key2 = merge(self.df, self.df2, on='key2', how='outer')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='outer')
joined_both = merge(self.df, self.df2, how='outer')
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='outer')
def test_inner_join(self):
joined_key2 = merge(self.df, self.df2, on='key2', how='inner')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='inner')
joined_both = merge(self.df, self.df2, how='inner')
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='inner')
def test_handle_overlap(self):
joined = merge(self.df, self.df2, on='key2',
suffixes=['.foo', '.bar'])
self.assertIn('key1.foo', joined)
self.assertIn('key1.bar', joined)
def test_handle_overlap_arbitrary_key(self):
joined = merge(self.df, self.df2,
left_on='key2', right_on='key1',
suffixes=['.foo', '.bar'])
self.assertIn('key1.foo', joined)
self.assertIn('key2.bar', joined)
def test_join_on(self):
target = self.target
source = self.source
merged = target.join(source, on='C')
self.assert_series_equal(merged['MergedA'], target['A'],
check_names=False)
self.assert_series_equal(merged['MergedD'], target['D'],
check_names=False)
# join with duplicates (fix regression from DataFrame/Matrix merge)
df = DataFrame({'key': ['a', 'a', 'b', 'b', 'c']})
df2 = DataFrame({'value': [0, 1, 2]}, index=['a', 'b', 'c'])
joined = df.join(df2, on='key')
expected = DataFrame({'key': ['a', 'a', 'b', 'b', 'c'],
'value': [0, 0, 1, 1, 2]})
assert_frame_equal(joined, expected)
# Test when some are missing
df_a = DataFrame([[1], [2], [3]], index=['a', 'b', 'c'],
columns=['one'])
df_b = DataFrame([['foo'], ['bar']], index=[1, 2],
columns=['two'])
df_c = DataFrame([[1], [2]], index=[1, 2],
columns=['three'])
joined = df_a.join(df_b, on='one')
joined = joined.join(df_c, on='one')
self.assertTrue(np.isnan(joined['two']['c']))
self.assertTrue(np.isnan(joined['three']['c']))
# merge column not p resent
self.assertRaises(KeyError, target.join, source, on='E')
# overlap
source_copy = source.copy()
source_copy['A'] = 0
self.assertRaises(ValueError, target.join, source_copy, on='A')
def test_join_on_fails_with_different_right_index(self):
with tm.assertRaises(ValueError):
df = DataFrame({'a': np.random.choice(['m', 'f'], size=3),
'b': np.random.randn(3)})
df2 = DataFrame({'a': np.random.choice(['m', 'f'], size=10),
'b': np.random.randn(10)},
index=tm.makeCustomIndex(10, 2))
merge(df, df2, left_on='a', right_index=True)
def test_join_on_fails_with_different_left_index(self):
with tm.assertRaises(ValueError):
df = DataFrame({'a': np.random.choice(['m', 'f'], size=3),
'b': np.random.randn(3)},
index=tm.makeCustomIndex(10, 2))
df2 = DataFrame({'a': np.random.choice(['m', 'f'], size=10),
'b': np.random.randn(10)})
merge(df, df2, right_on='b', left_index=True)
def test_join_on_fails_with_different_column_counts(self):
with tm.assertRaises(ValueError):
df = DataFrame({'a': np.random.choice(['m', 'f'], size=3),
'b': np.random.randn(3)})
df2 = DataFrame({'a': np.random.choice(['m', 'f'], size=10),
'b': np.random.randn(10)},
index=tm.makeCustomIndex(10, 2))
merge(df, df2, right_on='a', left_on=['a', 'b'])
def test_join_on_fails_with_wrong_object_type(self):
# GH12081
wrongly_typed = [Series([0, 1]), 2, 'str', None, np.array([0, 1])]
df = DataFrame({'a': [1, 1]})
for obj in wrongly_typed:
with tm.assertRaisesRegexp(ValueError, str(type(obj))):
merge(obj, df, left_on='a', right_on='a')
with tm.assertRaisesRegexp(ValueError, str(type(obj))):
merge(df, obj, left_on='a', right_on='a')
def test_join_on_pass_vector(self):
expected = self.target.join(self.source, on='C')
del expected['C']
join_col = self.target.pop('C')
result = self.target.join(self.source, on=join_col)
assert_frame_equal(result, expected)
def test_join_with_len0(self):
# nothing to merge
merged = self.target.join(self.source.reindex([]), on='C')
for col in self.source:
self.assertIn(col, merged)
self.assertTrue(merged[col].isnull().all())
merged2 = self.target.join(self.source.reindex([]), on='C',
how='inner')
self.assert_index_equal(merged2.columns, merged.columns)
self.assertEqual(len(merged2), 0)
def test_join_on_inner(self):
df = DataFrame({'key': ['a', 'a', 'd', 'b', 'b', 'c']})
df2 = DataFrame({'value': [0, 1]}, index=['a', 'b'])
joined = df.join(df2, on='key', how='inner')
expected = df.join(df2, on='key')
expected = expected[expected['value'].notnull()]
self.assert_series_equal(joined['key'], expected['key'],
check_dtype=False)
self.assert_series_equal(joined['value'], expected['value'],
check_dtype=False)
self.assert_index_equal(joined.index, expected.index)
def test_join_on_singlekey_list(self):
df = DataFrame({'key': ['a', 'a', 'b', 'b', 'c']})
df2 = DataFrame({'value': [0, 1, 2]}, index=['a', 'b', 'c'])
# corner cases
joined = df.join(df2, on=['key'])
expected = df.join(df2, on='key')
assert_frame_equal(joined, expected)
def test_join_on_series(self):
result = self.target.join(self.source['MergedA'], on='C')
expected = self.target.join(self.source[['MergedA']], on='C')
assert_frame_equal(result, expected)
def test_join_on_series_buglet(self):
# GH #638
df = DataFrame({'a': [1, 1]})
ds = Series([2], index=[1], name='b')
result = df.join(ds, on='a')
expected = DataFrame({'a': [1, 1],
'b': [2, 2]}, index=df.index)
tm.assert_frame_equal(result, expected)
def test_join_index_mixed(self):
df1 = DataFrame({'A': 1., 'B': 2, 'C': 'foo', 'D': True},
index=np.arange(10),
columns=['A', 'B', 'C', 'D'])
self.assertEqual(df1['B'].dtype, np.int64)
self.assertEqual(df1['D'].dtype, np.bool_)
df2 = DataFrame({'A': 1., 'B': 2, 'C': 'foo', 'D': True},
index=np.arange(0, 10, 2),
columns=['A', 'B', 'C', 'D'])
# overlap
joined = df1.join(df2, lsuffix='_one', rsuffix='_two')
expected_columns = ['A_one', 'B_one', 'C_one', 'D_one',
'A_two', 'B_two', 'C_two', 'D_two']
df1.columns = expected_columns[:4]
df2.columns = expected_columns[4:]
expected = _join_by_hand(df1, df2)
assert_frame_equal(joined, expected)
# no overlapping blocks
df1 = DataFrame(index=np.arange(10))
df1['bool'] = True
df1['string'] = 'foo'
df2 = DataFrame(index=np.arange(5, 15))
df2['int'] = 1
df2['float'] = 1.
for kind in ['inner', 'outer', 'left', 'right']:
joined = df1.join(df2, how=kind)
expected = _join_by_hand(df1, df2, how=kind)
assert_frame_equal(joined, expected)
joined = df2.join(df1, how=kind)
expected = _join_by_hand(df2, df1, how=kind)
assert_frame_equal(joined, expected)
def test_join_empty_bug(self):
# generated an exception in 0.4.3
x = DataFrame()
x.join(DataFrame([3], index=[0], columns=['A']), how='outer')
def test_join_unconsolidated(self):
# GH #331
a = DataFrame(randn(30, 2), columns=['a', 'b'])
c = Series(randn(30))
a['c'] = c
d = DataFrame(randn(30, 1), columns=['q'])
# it works!
a.join(d)
d.join(a)
def test_join_multiindex(self):
index1 = MultiIndex.from_arrays([['a', 'a', 'a', 'b', 'b', 'b'],
[1, 2, 3, 1, 2, 3]],
names=['first', 'second'])
index2 = MultiIndex.from_arrays([['b', 'b', 'b', 'c', 'c', 'c'],
[1, 2, 3, 1, 2, 3]],
names=['first', 'second'])
df1 = DataFrame(data=np.random.randn(6), index=index1,
columns=['var X'])
df2 = DataFrame(data=np.random.randn(6), index=index2,
columns=['var Y'])
df1 = df1.sortlevel(0)
df2 = df2.sortlevel(0)
joined = df1.join(df2, how='outer')
ex_index = index1._tuple_index.union(index2._tuple_index)
expected = df1.reindex(ex_index).join(df2.reindex(ex_index))
expected.index.names = index1.names
assert_frame_equal(joined, expected)
self.assertEqual(joined.index.names, index1.names)
df1 = df1.sortlevel(1)
df2 = df2.sortlevel(1)
joined = df1.join(df2, how='outer').sortlevel(0)
ex_index = index1._tuple_index.union(index2._tuple_index)
expected = df1.reindex(ex_index).join(df2.reindex(ex_index))
expected.index.names = index1.names
assert_frame_equal(joined, expected)
self.assertEqual(joined.index.names, index1.names)
def test_join_inner_multiindex(self):
key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux',
'qux', 'snap']
key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two',
'three', 'one']
data = np.random.randn(len(key1))
data = DataFrame({'key1': key1, 'key2': key2,
'data': data})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
to_join = DataFrame(np.random.randn(10, 3), index=index,
columns=['j_one', 'j_two', 'j_three'])
joined = data.join(to_join, on=['key1', 'key2'], how='inner')
expected = merge(data, to_join.reset_index(),
left_on=['key1', 'key2'],
right_on=['first', 'second'], how='inner',
sort=False)
expected2 = merge(to_join, data,
right_on=['key1', 'key2'], left_index=True,
how='inner', sort=False)
assert_frame_equal(joined, expected2.reindex_like(joined))
expected2 = merge(to_join, data, right_on=['key1', 'key2'],
left_index=True, how='inner', sort=False)
expected = expected.drop(['first', 'second'], axis=1)
expected.index = joined.index
self.assertTrue(joined.index.is_monotonic)
assert_frame_equal(joined, expected)
# _assert_same_contents(expected, expected2.ix[:, expected.columns])
def test_join_hierarchical_mixed(self):
# GH 2024
df = DataFrame([(1, 2, 3), (4, 5, 6)], columns=['a', 'b', 'c'])
new_df = df.groupby(['a']).agg({'b': [np.mean, np.sum]})
other_df = DataFrame(
[(1, 2, 3), (7, 10, 6)], columns=['a', 'b', 'd'])
other_df.set_index('a', inplace=True)
# GH 9455, 12219
with tm.assert_produces_warning(UserWarning):
result = merge(new_df, other_df, left_index=True, right_index=True)
self.assertTrue(('b', 'mean') in result)
self.assertTrue('b' in result)
def test_join_float64_float32(self):
a = DataFrame(randn(10, 2), columns=['a', 'b'], dtype=np.float64)
b = DataFrame(randn(10, 1), columns=['c'], dtype=np.float32)
joined = a.join(b)
self.assertEqual(joined.dtypes['a'], 'float64')
self.assertEqual(joined.dtypes['b'], 'float64')
self.assertEqual(joined.dtypes['c'], 'float32')
a = np.random.randint(0, 5, 100).astype('int64')
b = np.random.random(100).astype('float64')
c = np.random.random(100).astype('float32')
df = DataFrame({'a': a, 'b': b, 'c': c})
xpdf = DataFrame({'a': a, 'b': b, 'c': c})
s = DataFrame(np.random.random(5).astype('float32'), columns=['md'])
rs = df.merge(s, left_on='a', right_index=True)
self.assertEqual(rs.dtypes['a'], 'int64')
self.assertEqual(rs.dtypes['b'], 'float64')
self.assertEqual(rs.dtypes['c'], 'float32')
self.assertEqual(rs.dtypes['md'], 'float32')
xp = xpdf.merge(s, left_on='a', right_index=True)
assert_frame_equal(rs, xp)
def test_join_many_non_unique_index(self):
df1 = DataFrame({"a": [1, 1], "b": [1, 1], "c": [10, 20]})
df2 = DataFrame({"a": [1, 1], "b": [1, 2], "d": [100, 200]})
df3 = DataFrame({"a": [1, 1], "b": [1, 2], "e": [1000, 2000]})
idf1 = df1.set_index(["a", "b"])
idf2 = df2.set_index(["a", "b"])
idf3 = df3.set_index(["a", "b"])
result = idf1.join([idf2, idf3], how='outer')
df_partially_merged = merge(df1, df2, on=['a', 'b'], how='outer')
expected = merge(df_partially_merged, df3, on=['a', 'b'], how='outer')
result = result.reset_index()
expected = expected[result.columns]
expected['a'] = expected.a.astype('int64')
expected['b'] = expected.b.astype('int64')
assert_frame_equal(result, expected)
df1 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 1], "c": [10, 20, 30]})
df2 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 2], "d": [100, 200, 300]})
df3 = DataFrame(
{"a": [1, 1, 1], "b": [1, 1, 2], "e": [1000, 2000, 3000]})
idf1 = df1.set_index(["a", "b"])
idf2 = df2.set_index(["a", "b"])
idf3 = df3.set_index(["a", "b"])
result = idf1.join([idf2, idf3], how='inner')
df_partially_merged = merge(df1, df2, on=['a', 'b'], how='inner')
expected = merge(df_partially_merged, df3, on=['a', 'b'], how='inner')
result = result.reset_index()
assert_frame_equal(result, expected.ix[:, result.columns])
# GH 11519
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
s = Series(np.repeat(np.arange(8), 2),
index=np.repeat(np.arange(8), 2), name='TEST')
inner = df.join(s, how='inner')
outer = df.join(s, how='outer')
left = df.join(s, how='left')
right = df.join(s, how='right')
assert_frame_equal(inner, outer)
assert_frame_equal(inner, left)
assert_frame_equal(inner, right)
def test_join_sort(self):
left = DataFrame({'key': ['foo', 'bar', 'baz', 'foo'],
'value': [1, 2, 3, 4]})
right = DataFrame({'value2': ['a', 'b', 'c']},
index=['bar', 'baz', 'foo'])
joined = left.join(right, on='key', sort=True)
expected = DataFrame({'key': ['bar', 'baz', 'foo', 'foo'],
'value': [2, 3, 1, 4],
'value2': ['a', 'b', 'c', 'c']},
index=[1, 2, 0, 3])
assert_frame_equal(joined, expected)
# smoke test
joined = left.join(right, on='key', sort=False)
self.assert_index_equal(joined.index, pd.Index(lrange(4)))
def test_join_mixed_non_unique_index(self):
# GH 12814, unorderable types in py3 with a non-unique index
df1 = DataFrame({'a': [1, 2, 3, 4]}, index=[1, 2, 3, 'a'])
df2 = DataFrame({'b': [5, 6, 7, 8]}, index=[1, 3, 3, 4])
result = df1.join(df2)
expected = DataFrame({'a': [1, 2, 3, 3, 4],
'b': [5, np.nan, 6, 7, np.nan]},
index=[1, 2, 3, 3, 'a'])
tm.assert_frame_equal(result, expected)
df3 = DataFrame({'a': [1, 2, 3, 4]}, index=[1, 2, 2, 'a'])
df4 = DataFrame({'b': [5, 6, 7, 8]}, index=[1, 2, 3, 4])
result = df3.join(df4)
expected = DataFrame({'a': [1, 2, 3, 4], 'b': [5, 6, 6, np.nan]},
index=[1, 2, 2, 'a'])
tm.assert_frame_equal(result, expected)
def test_mixed_type_join_with_suffix(self):
# GH #916
df = DataFrame(np.random.randn(20, 6),
columns=['a', 'b', 'c', 'd', 'e', 'f'])
df.insert(0, 'id', 0)
df.insert(5, 'dt', 'foo')
grouped = df.groupby('id')
mn = grouped.mean()
cn = grouped.count()
# it works!
mn.join(cn, rsuffix='_right')
def test_join_many(self):
df = DataFrame(np.random.randn(10, 6), columns=list('abcdef'))
df_list = [df[['a', 'b']], df[['c', 'd']], df[['e', 'f']]]
joined = df_list[0].join(df_list[1:])
tm.assert_frame_equal(joined, df)
df_list = [df[['a', 'b']][:-2],
df[['c', 'd']][2:], df[['e', 'f']][1:9]]
def _check_diff_index(df_list, result, exp_index):
reindexed = [x.reindex(exp_index) for x in df_list]
expected = reindexed[0].join(reindexed[1:])
tm.assert_frame_equal(result, expected)
# different join types
joined = df_list[0].join(df_list[1:], how='outer')
_check_diff_index(df_list, joined, df.index)
joined = df_list[0].join(df_list[1:])
_check_diff_index(df_list, joined, df_list[0].index)
joined = df_list[0].join(df_list[1:], how='inner')
_check_diff_index(df_list, joined, df.index[2:8])
self.assertRaises(ValueError, df_list[0].join, df_list[1:], on='a')
def test_join_many_mixed(self):
df = DataFrame(np.random.randn(8, 4), columns=['A', 'B', 'C', 'D'])
df['key'] = ['foo', 'bar'] * 4
df1 = df.ix[:, ['A', 'B']]
df2 = df.ix[:, ['C', 'D']]
df3 = df.ix[:, ['key']]
result = df1.join([df2, df3])
assert_frame_equal(result, df)
def test_join_dups(self):
# joining dups
df = concat([DataFrame(np.random.randn(10, 4),
columns=['A', 'A', 'B', 'B']),
DataFrame(np.random.randint(0, 10, size=20)
.reshape(10, 2),
columns=['A', 'C'])],
axis=1)
expected = concat([df, df], axis=1)
result = df.join(df, rsuffix='_2')
result.columns = expected.columns
assert_frame_equal(result, expected)
# GH 4975, invalid join on dups
w = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
x = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
y = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
z = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
dta = x.merge(y, left_index=True, right_index=True).merge(
z, left_index=True, right_index=True, how="outer")
dta = dta.merge(w, left_index=True, right_index=True)
expected = concat([x, y, z, w], axis=1)
expected.columns = ['x_x', 'y_x', 'x_y',
'y_y', 'x_x', 'y_x', 'x_y', 'y_y']
assert_frame_equal(dta, expected)
def test_panel_join(self):
panel = tm.makePanel()
tm.add_nans(panel)
p1 = panel.ix[:2, :10, :3]
p2 = panel.ix[2:, 5:, 2:]
# left join
result = p1.join(p2)
expected = p1.copy()
expected['ItemC'] = p2['ItemC']
tm.assert_panel_equal(result, expected)
# right join
result = p1.join(p2, how='right')
expected = p2.copy()
expected['ItemA'] = p1['ItemA']
expected['ItemB'] = p1['ItemB']
expected = expected.reindex(items=['ItemA', 'ItemB', 'ItemC'])
tm.assert_panel_equal(result, expected)
# inner join
result = p1.join(p2, how='inner')
expected = panel.ix[:, 5:10, 2:3]
tm.assert_panel_equal(result, expected)
# outer join
result = p1.join(p2, how='outer')
expected = p1.reindex(major=panel.major_axis,
minor=panel.minor_axis)
expected = expected.join(p2.reindex(major=panel.major_axis,
minor=panel.minor_axis))
tm.assert_panel_equal(result, expected)
def test_panel_join_overlap(self):
panel = tm.makePanel()
tm.add_nans(panel)
p1 = panel.ix[['ItemA', 'ItemB', 'ItemC']]
p2 = panel.ix[['ItemB', 'ItemC']]
# Expected index is
#
# ItemA, ItemB_p1, ItemC_p1, ItemB_p2, ItemC_p2
joined = p1.join(p2, lsuffix='_p1', rsuffix='_p2')
p1_suf = p1.ix[['ItemB', 'ItemC']].add_suffix('_p1')
p2_suf = p2.ix[['ItemB', 'ItemC']].add_suffix('_p2')
no_overlap = panel.ix[['ItemA']]
expected = no_overlap.join(p1_suf.join(p2_suf))
tm.assert_panel_equal(joined, expected)
def test_panel_join_many(self):
tm.K = 10
panel = tm.makePanel()
tm.K = 4
panels = [panel.ix[:2], panel.ix[2:6], panel.ix[6:]]
joined = panels[0].join(panels[1:])
tm.assert_panel_equal(joined, panel)
panels = [panel.ix[:2, :-5], panel.ix[2:6, 2:], panel.ix[6:, 5:-7]]
data_dict = {}
for p in panels:
data_dict.update(p.iteritems())
joined = panels[0].join(panels[1:], how='inner')
expected = pd.Panel.from_dict(data_dict, intersect=True)
tm.assert_panel_equal(joined, expected)
joined = panels[0].join(panels[1:], how='outer')
expected = pd.Panel.from_dict(data_dict, intersect=False)
tm.assert_panel_equal(joined, expected)
# edge cases
self.assertRaises(ValueError, panels[0].join, panels[1:],
how='outer', lsuffix='foo', rsuffix='bar')
self.assertRaises(ValueError, panels[0].join, panels[1:],
how='right')
def _check_join(left, right, result, join_col, how='left',
lsuffix='_x', rsuffix='_y'):
# some smoke tests
for c in join_col:
assert(result[c].notnull().all())
left_grouped = left.groupby(join_col)
right_grouped = right.groupby(join_col)
for group_key, group in result.groupby(join_col):
l_joined = _restrict_to_columns(group, left.columns, lsuffix)
r_joined = _restrict_to_columns(group, right.columns, rsuffix)
try:
lgroup = left_grouped.get_group(group_key)
except KeyError:
if how in ('left', 'inner'):
raise AssertionError('key %s should not have been in the join'
% str(group_key))
_assert_all_na(l_joined, left.columns, join_col)
else:
_assert_same_contents(l_joined, lgroup)
try:
rgroup = right_grouped.get_group(group_key)
except KeyError:
if how in ('right', 'inner'):
raise AssertionError('key %s should not have been in the join'
% str(group_key))
_assert_all_na(r_joined, right.columns, join_col)
else:
_assert_same_contents(r_joined, rgroup)
def _restrict_to_columns(group, columns, suffix):
found = [c for c in group.columns
if c in columns or c.replace(suffix, '') in columns]
# filter
group = group.ix[:, found]
# get rid of suffixes, if any
group = group.rename(columns=lambda x: x.replace(suffix, ''))
# put in the right order...
group = group.ix[:, columns]
return group
def _assert_same_contents(join_chunk, source):
NA_SENTINEL = -1234567 # drop_duplicates not so NA-friendly...
jvalues = join_chunk.fillna(NA_SENTINEL).drop_duplicates().values
svalues = source.fillna(NA_SENTINEL).drop_duplicates().values
rows = set(tuple(row) for row in jvalues)
assert(len(rows) == len(source))
assert(all(tuple(row) in rows for row in svalues))
def _assert_all_na(join_chunk, source_columns, join_col):
for c in source_columns:
if c in join_col:
continue
assert(join_chunk[c].isnull().all())
def _join_by_hand(a, b, how='left'):
join_index = a.index.join(b.index, how=how)
a_re = a.reindex(join_index)
b_re = b.reindex(join_index)
result_columns = a.columns.append(b.columns)
for col, s in compat.iteritems(b_re):
a_re[col] = s
return a_re.reindex(columns=result_columns)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
michigraber/scikit-learn | examples/mixture/plot_gmm_classifier.py | 250 | 3918 | """
==================
GMM classification
==================
Demonstration of Gaussian mixture models for classification.
See :ref:`gmm` for more information on the estimator.
Plots predicted labels on both training and held out test data using a
variety of GMM classifiers on the iris dataset.
Compares GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <[email protected]>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import StratifiedKFold
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
def make_ellipses(gmm, ax):
for n, color in enumerate('rgb'):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(iris.target, n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
classifiers = dict((covar_type, GMM(n_components=n_classes,
covariance_type=covar_type, init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * n_classifiers / 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, classifier) in enumerate(classifiers.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
classifier.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
classifier.fit(X_train)
h = plt.subplot(2, n_classifiers / 2, index + 1)
make_ellipses(classifier, h)
for n, color in enumerate('rgb'):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], 0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate('rgb'):
data = X_test[y_test == n]
plt.plot(data[:, 0], data[:, 1], 'x', color=color)
y_train_pred = classifier.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = classifier.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(loc='lower right', prop=dict(size=12))
plt.show()
| bsd-3-clause |
rvraghav93/scikit-learn | examples/decomposition/plot_incremental_pca.py | 175 | 1974 | """
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
colors = ['navy', 'turquoise', 'darkorange']
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for color, i, target_name in zip(colors, [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
color=color, lw=2, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best", shadow=False, scatterpoints=1)
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
| bsd-3-clause |
nhzandi/openface | util/detect-outliers.py | 10 | 2768 | #!/usr/bin/env python2
#
# Detect outlier faces (not of the same person) in a directory
# of aligned images.
# Brandon Amos
# 2016/02/14
#
# Copyright 2015-2016 Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
start = time.time()
import argparse
import os
import glob
import numpy as np
np.set_printoptions(precision=2)
from sklearn.metrics.pairwise import euclidean_distances
import cv2
import openface
fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, '..', 'models')
openfaceModelDir = os.path.join(modelDir, 'openface')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--networkModel', type=str, help="Path to Torch network model.",
default=os.path.join(openfaceModelDir, 'nn4.small2.v1.t7'))
parser.add_argument('--imgDim', type=int,
help="Default image dimension.", default=96)
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--threshold', type=float, default=0.9)
parser.add_argument('--delete', action='store_true', help='Delete the outliers.')
parser.add_argument('directory')
args = parser.parse_args()
net = openface.TorchNeuralNet(args.networkModel, args.imgDim, cuda=args.cuda)
reps = []
paths = sorted(list(glob.glob(os.path.join(args.directory, '*.png'))))
print("=== {} ===".format(args.directory))
for imgPath in paths:
if cv2.imread(imgPath) is None:
print("Warning: Skipping bad image file: {}".format(imgPath))
if args.delete:
# Remove the file if it's not a valid image.
os.remove(imgPath)
else:
reps.append(net.forwardPath(imgPath))
mean = np.mean(reps, axis=0)
dists = euclidean_distances(reps, mean)
outliers = []
for path, dist in zip(paths, dists):
dist = dist.take(0)
if dist > args.threshold:
outliers.append((path, dist))
print("Found {} outlier(s) from {} images.".format(len(outliers), len(paths)))
for path, dist in outliers:
print(" + {} ({:0.2f})".format(path, dist))
if args.delete:
os.remove(path)
if __name__ == '__main__':
main()
| apache-2.0 |
gfyoung/pandas | asv_bench/benchmarks/dtypes.py | 2 | 3456 | import string
import numpy as np
from pandas import DataFrame
import pandas._testing as tm
from pandas.api.types import pandas_dtype
from .pandas_vb_common import (
datetime_dtypes,
extension_dtypes,
lib,
numeric_dtypes,
string_dtypes,
)
_numpy_dtypes = [
np.dtype(dtype) for dtype in (numeric_dtypes + datetime_dtypes + string_dtypes)
]
_dtypes = _numpy_dtypes + extension_dtypes
class Dtypes:
params = _dtypes + list(map(lambda dt: dt.name, _dtypes))
param_names = ["dtype"]
def time_pandas_dtype(self, dtype):
pandas_dtype(dtype)
class DtypesInvalid:
param_names = ["dtype"]
params = ["scalar-string", "scalar-int", "list-string", "array-string"]
data_dict = {
"scalar-string": "foo",
"scalar-int": 1,
"list-string": ["foo"] * 1000,
"array-string": np.array(["foo"] * 1000),
}
def time_pandas_dtype_invalid(self, dtype):
try:
pandas_dtype(self.data_dict[dtype])
except TypeError:
pass
class InferDtypes:
param_names = ["dtype"]
data_dict = {
"np-object": np.array([1] * 100000, dtype="O"),
"py-object": [1] * 100000,
"np-null": np.array([1] * 50000 + [np.nan] * 50000),
"py-null": [1] * 50000 + [None] * 50000,
"np-int": np.array([1] * 100000, dtype=int),
"np-floating": np.array([1.0] * 100000, dtype=float),
"empty": [],
"bytes": [b"a"] * 100000,
}
params = list(data_dict.keys())
def time_infer_skipna(self, dtype):
lib.infer_dtype(self.data_dict[dtype], skipna=True)
def time_infer(self, dtype):
lib.infer_dtype(self.data_dict[dtype], skipna=False)
class SelectDtypes:
params = [
tm.ALL_INT_DTYPES
+ tm.ALL_EA_INT_DTYPES
+ tm.FLOAT_DTYPES
+ tm.COMPLEX_DTYPES
+ tm.DATETIME64_DTYPES
+ tm.TIMEDELTA64_DTYPES
+ tm.BOOL_DTYPES
]
param_names = ["dtype"]
def setup(self, dtype):
N, K = 5000, 50
self.index = tm.makeStringIndex(N)
self.columns = tm.makeStringIndex(K)
def create_df(data):
return DataFrame(data, index=self.index, columns=self.columns)
self.df_int = create_df(np.random.randint(low=100, size=(N, K)))
self.df_float = create_df(np.random.randn(N, K))
self.df_bool = create_df(np.random.choice([True, False], size=(N, K)))
self.df_string = create_df(
np.random.choice(list(string.ascii_letters), size=(N, K))
)
def time_select_dtype_int_include(self, dtype):
self.df_int.select_dtypes(include=dtype)
def time_select_dtype_int_exclude(self, dtype):
self.df_int.select_dtypes(exclude=dtype)
def time_select_dtype_float_include(self, dtype):
self.df_float.select_dtypes(include=dtype)
def time_select_dtype_float_exclude(self, dtype):
self.df_float.select_dtypes(exclude=dtype)
def time_select_dtype_bool_include(self, dtype):
self.df_bool.select_dtypes(include=dtype)
def time_select_dtype_bool_exclude(self, dtype):
self.df_bool.select_dtypes(exclude=dtype)
def time_select_dtype_string_include(self, dtype):
self.df_string.select_dtypes(include=dtype)
def time_select_dtype_string_exclude(self, dtype):
self.df_string.select_dtypes(exclude=dtype)
from .pandas_vb_common import setup # noqa: F401 isort:skip
| bsd-3-clause |
wkfwkf/statsmodels | statsmodels/sandbox/stats/stats_dhuard.py | 33 | 10184 | '''
from David Huard's scipy sandbox, also attached to a ticket and
in the matplotlib-user mailinglist (links ???)
Notes
=====
out of bounds interpolation raises exception and wouldn't be completely
defined ::
>>> scoreatpercentile(x, [0,25,50,100])
Traceback (most recent call last):
...
raise ValueError("A value in x_new is below the interpolation "
ValueError: A value in x_new is below the interpolation range.
>>> percentileofscore(x, [-50, 50])
Traceback (most recent call last):
...
raise ValueError("A value in x_new is below the interpolation "
ValueError: A value in x_new is below the interpolation range.
idea
====
histogram and empirical interpolated distribution
-------------------------------------------------
dual constructor
* empirical cdf : cdf on all observations through linear interpolation
* binned cdf : based on histogram
both should work essentially the same, although pdf of empirical has
many spikes, fluctuates a lot
- alternative: binning based on interpolated cdf : example in script
* ppf: quantileatscore based on interpolated cdf
* rvs : generic from ppf
* stats, expectation ? how does integration wrt cdf work - theory?
Problems
* limits, lower and upper bound of support
does not work or is undefined with empirical cdf and interpolation
* extending bounds ?
matlab has pareto tails for empirical distribution, breaks linearity
empirical distribution with higher order interpolation
------------------------------------------------------
* should work easily enough with interpolating splines
* not piecewise linear
* can use pareto (or other) tails
* ppf how do I get the inverse function of a higher order spline?
Chuck: resample and fit spline to inverse function
this will have an approximation error in the inverse function
* -> doesn't work: higher order spline doesn't preserve monotonicity
see mailing list for response to my question
* pmf from derivative available in spline
-> forget this and use kernel density estimator instead
bootstrap/empirical distribution:
---------------------------------
discrete distribution on real line given observations
what's defined?
* cdf : step function
* pmf : points with equal weight 1/nobs
* rvs : resampling
* ppf : quantileatscore on sample?
* moments : from data ?
* expectation ? sum_{all observations x} [func(x) * pmf(x)]
* similar for discrete distribution on real line
* References : ?
* what's the point? most of it is trivial, just for the record ?
Created on Monday, May 03, 2010, 11:47:03 AM
Author: josef-pktd, parts based on David Huard
License: BSD
'''
from __future__ import print_function
import scipy.interpolate as interpolate
import numpy as np
def scoreatpercentile(data, percentile):
"""Return the score at the given percentile of the data.
Example:
>>> data = randn(100)
>>> scoreatpercentile(data, 50)
will return the median of sample `data`.
"""
per = np.array(percentile)
cdf = empiricalcdf(data)
interpolator = interpolate.interp1d(np.sort(cdf), np.sort(data))
return interpolator(per/100.)
def percentileofscore(data, score):
"""Return the percentile-position of score relative to data.
score: Array of scores at which the percentile is computed.
Return percentiles (0-100).
Example
r = randn(50)
x = linspace(-2,2,100)
percentileofscore(r,x)
Raise an error if the score is outside the range of data.
"""
cdf = empiricalcdf(data)
interpolator = interpolate.interp1d(np.sort(data), np.sort(cdf))
return interpolator(score)*100.
def empiricalcdf(data, method='Hazen'):
"""Return the empirical cdf.
Methods available:
Hazen: (i-0.5)/N
Weibull: i/(N+1)
Chegodayev: (i-.3)/(N+.4)
Cunnane: (i-.4)/(N+.2)
Gringorten: (i-.44)/(N+.12)
California: (i-1)/N
Where i goes from 1 to N.
"""
i = np.argsort(np.argsort(data)) + 1.
N = len(data)
method = method.lower()
if method == 'hazen':
cdf = (i-0.5)/N
elif method == 'weibull':
cdf = i/(N+1.)
elif method == 'california':
cdf = (i-1.)/N
elif method == 'chegodayev':
cdf = (i-.3)/(N+.4)
elif method == 'cunnane':
cdf = (i-.4)/(N+.2)
elif method == 'gringorten':
cdf = (i-.44)/(N+.12)
else:
raise ValueError('Unknown method. Choose among Weibull, Hazen,'
'Chegodayev, Cunnane, Gringorten and California.')
return cdf
class HistDist(object):
'''Distribution with piecewise linear cdf, pdf is step function
can be created from empiricial distribution or from a histogram (not done yet)
work in progress, not finished
'''
def __init__(self, data):
self.data = np.atleast_1d(data)
self.binlimit = np.array([self.data.min(), self.data.max()])
sortind = np.argsort(data)
self._datasorted = data[sortind]
self.ranking = np.argsort(sortind)
cdf = self.empiricalcdf()
self._empcdfsorted = np.sort(cdf)
self.cdfintp = interpolate.interp1d(self._datasorted, self._empcdfsorted)
self.ppfintp = interpolate.interp1d(self._empcdfsorted, self._datasorted)
def empiricalcdf(self, data=None, method='Hazen'):
"""Return the empirical cdf.
Methods available:
Hazen: (i-0.5)/N
Weibull: i/(N+1)
Chegodayev: (i-.3)/(N+.4)
Cunnane: (i-.4)/(N+.2)
Gringorten: (i-.44)/(N+.12)
California: (i-1)/N
Where i goes from 1 to N.
"""
if data is None:
data = self.data
i = self.ranking
else:
i = np.argsort(np.argsort(data)) + 1.
N = len(data)
method = method.lower()
if method == 'hazen':
cdf = (i-0.5)/N
elif method == 'weibull':
cdf = i/(N+1.)
elif method == 'california':
cdf = (i-1.)/N
elif method == 'chegodayev':
cdf = (i-.3)/(N+.4)
elif method == 'cunnane':
cdf = (i-.4)/(N+.2)
elif method == 'gringorten':
cdf = (i-.44)/(N+.12)
else:
raise ValueError('Unknown method. Choose among Weibull, Hazen,'
'Chegodayev, Cunnane, Gringorten and California.')
return cdf
def cdf_emp(self, score):
'''
this is score in dh
'''
return self.cdfintp(score)
#return percentileofscore(self.data, score)
def ppf_emp(self, quantile):
'''
this is score in dh
'''
return self.ppfintp(quantile)
#return scoreatpercentile(self.data, quantile*100)
#from DHuard http://old.nabble.com/matplotlib-f2903.html
def optimize_binning(self, method='Freedman'):
"""Find the optimal number of bins and update the bin countaccordingly.
Available methods : Freedman
Scott
"""
nobs = len(self.data)
if method=='Freedman':
IQR = self.ppf_emp(0.75) - self.ppf_emp(0.25) # Interquantile range(75% -25%)
width = 2* IQR* nobs**(-1./3)
elif method=='Scott':
width = 3.49 * np.std(self.data) * nobs**(-1./3)
self.nbin = (self.binlimit.ptp()/width)
return self.nbin
#changes: josef-pktd
if __name__ == '__main__':
import matplotlib.pyplot as plt
nobs = 100
x = np.random.randn(nobs)
examples = [2]
if 1 in examples:
empiricalcdf(x)
print(percentileofscore(x, 0.5))
print(scoreatpercentile(x, 50))
import matplotlib.pyplot as plt
xsupp = np.linspace(x.min(), x.max())
pos = percentileofscore(x, xsupp)
plt.plot(xsupp, pos)
#perc = np.linspace(2.5, 97.5)
#plt.plot(scoreatpercentile(x, perc), perc)
plt.plot(scoreatpercentile(x, pos), pos+1)
#emp = interpolate.PiecewisePolynomial(np.sort(empiricalcdf(x)), np.sort(x))
emp=interpolate.InterpolatedUnivariateSpline(np.sort(x),np.sort(empiricalcdf(x)),k=1)
pdfemp = np.array([emp.derivatives(xi)[1] for xi in xsupp])
plt.figure()
plt.plot(xsupp,pdfemp)
cdf_ongrid = emp(xsupp)
plt.figure()
plt.plot(xsupp, cdf_ongrid)
#get pdf from interpolated cdf on a regular grid
plt.figure()
plt.step(xsupp[:-1],np.diff(cdf_ongrid)/np.diff(xsupp))
#reduce number of bins/steps
xsupp2 = np.linspace(x.min(), x.max(), 25)
plt.figure()
plt.step(xsupp2[:-1],np.diff(emp(xsupp2))/np.diff(xsupp2))
#pdf using 25 original observations, every (nobs/25)th
xso = np.sort(x)
xs = xso[::nobs/25]
plt.figure()
plt.step(xs[:-1],np.diff(emp(xs))/np.diff(xs))
#lower end looks strange
histd = HistDist(x)
print(histd.optimize_binning())
print(histd.cdf_emp(histd.binlimit))
print(histd.ppf_emp([0.25, 0.5, 0.75]))
print(histd.cdf_emp([-0.5, -0.25, 0, 0.25, 0.5]))
xsupp = np.linspace(x.min(), x.max(), 500)
emp=interpolate.InterpolatedUnivariateSpline(np.sort(x),np.sort(empiricalcdf(x)),k=1)
#pdfemp = np.array([emp.derivatives(xi)[1] for xi in xsupp])
#plt.figure()
#plt.plot(xsupp,pdfemp)
cdf_ongrid = emp(xsupp)
plt.figure()
plt.plot(xsupp, cdf_ongrid)
ppfintp = interpolate.InterpolatedUnivariateSpline(cdf_ongrid,xsupp,k=3)
ppfs = ppfintp(cdf_ongrid)
plt.plot(ppfs, cdf_ongrid)
#ppfemp=interpolate.InterpolatedUnivariateSpline(np.sort(empiricalcdf(x)),np.sort(x),k=3)
#Don't use interpolating splines for function approximation
#with s=0.03 the spline is monotonic at the evaluated values
ppfemp=interpolate.UnivariateSpline(np.sort(empiricalcdf(x)),np.sort(x),k=3, s=0.03)
ppfe = ppfemp(cdf_ongrid)
plt.plot(ppfe, cdf_ongrid)
print('negative density')
print('(np.diff(ppfs)).min()', (np.diff(ppfs)).min())
print('(np.diff(cdf_ongrid)).min()', (np.diff(cdf_ongrid)).min())
#plt.show()
| bsd-3-clause |
treycausey/scikit-learn | sklearn/utils/testing.py | 2 | 19145 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import sklearn
from sklearn.base import BaseEstimator
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regexp
except ImportError:
# for Py 2.6
def assert_raises_regexp(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except Exception as e:
error_message = str(e)
if not re.compile(expected_regexp).match(error_message):
raise AssertionError("Error message should match pattern "
"'%s'. '%s' does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("Should have raised %r" %
expected_exception(expected_regexp))
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
# To remove when we support numpy 1.7
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
if not w[0].category is warning_class:
raise AssertionError("First warning for %s is not a "
"%s( is %s)"
% (func.__name__, warning_class, w[0]))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
if not w[0].category is warning_class:
raise AssertionError("First warning for %s is not a "
"%s( is %s)"
% (func.__name__, warning_class, w[0]))
# substring will match, the entire message with typo won't
msg = w[0].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if not check_in_message(msg):
raise AssertionError("The message received ('%s') for <%s> is "
"not the one you expected ('%s')"
% (msg, func.__name__, message
))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exception, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions"""
try:
function(*args, **kwargs)
raise AssertionError("Should have raised %r" % exception(message))
except exception as e:
error_message = str(e)
assert_in(message, error_message)
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict: contains data as
columns_dict[column_name] = array of data
dataname: name of data set
matfile: file-like object or file name
ordering: list of column_names, determines the ordering in the data set
Note: this function transposes all arrays, while fetch_mldata only
transposes 'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
meta_estimators = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
other = ["Pipeline", "FeatureUnion", "GridSearchCV", "RandomizedSearchCV"]
def all_estimators(include_meta_estimators=False, include_other=False,
type_filter=None):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_others : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
type_filter : string or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_other:
estimators = [c for c in estimators if not c[0] in other]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in meta_estimators]
if type_filter == 'classifier':
estimators = [est for est in estimators
if issubclass(est[1], ClassifierMixin)]
elif type_filter == 'regressor':
estimators = [est for est in estimators
if issubclass(est[1], RegressorMixin)]
elif type_filter == 'transformer':
estimators = [est for est in estimators
if issubclass(est[1], TransformerMixin)]
elif type_filter == 'cluster':
estimators = [est for est in estimators
if issubclass(est[1], ClusterMixin)]
elif type_filter is not None:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# We sort in order to have reproducible test failures
return sorted(estimators)
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
matplotlib.pylab.figure()
except:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def clean_warning_registry():
"""Safe way to reset warniings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod in sys.modules.values():
if hasattr(mod, reg):
getattr(mod, reg).clear()
| bsd-3-clause |
rssalessio/PythonVRFT | examples/example2.py | 1 | 2974 | # Copyright (c) [2021] Alessio Russo [[email protected]]. All rights reserved.
# This file is part of PythonVRFT.
# PythonVRFT is free software: you can redistribute it and/or modify
# it under the terms of the MIT License. You should have received a copy of
# the MIT License along with PythonVRFT.
# If not, see <https://opensource.org/licenses/MIT>.
#
# Code author: [Alessio Russo - [email protected]]
# Last update: 10th January 2021, by [email protected]
#
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as scipysig
from vrft import *
# Example 2
# ------------
# In this example we see how to apply VRFT to a simple SISO model
# with colored measurement noise (no instrumental variables)
# Input data is generated using random normal noise
#
def generateNoise(t):
# Generate colored noise
omega = 2*np.pi*100
xi = 0.9
dt = t[1] - t[0]
noise = np.random.normal(0,0.1,t.size)
tf = scipysig.TransferFunction([10*omega**2], [1, 2*xi*omega, omega**2])
# Second order system
_, yn, _ = scipysig.lsim(tf, noise, t)
return yn
#Generate time and u(t) signals
t_start = 0
t_end = 10
t_step = 1e-2
t = np.arange(t_start, t_end, t_step)
u = np.random.normal(size=t.size)
#Experiment
num = [0.5]
den = [1, -0.9]
sys = ExtendedTF(num, den, dt=t_step)
t, y = scipysig.dlsim(sys, u, t)
y = y.flatten() + generateNoise(t)
data = iddata(y, u, t_step, [0])
#Reference Model
refModel = ExtendedTF([0.6], [1, -0.4], dt=t_step)
#PI Controller
base = [ExtendedTF([1], [1, -1], dt=t_step),
ExtendedTF([1, 0], [1, -1], dt=t_step)]
#Experiment filter
L = refModel * (1 - refModel)
#VRFT
theta, r, loss, C = compute_vrft(data, refModel, base, L)
#Obtained controller
print("Controller: {}".format(C))
L = (C * sys).feedback()
print("Theta: {}".format(theta))
print(scipysig.ZerosPolesGain(L))
#Analysis
t = t[:len(r)]
u = np.ones(len(t))
_, yr = scipysig.dlsim(refModel, u, t)
_, yc = scipysig.dlsim(L, u, t)
_, ys = scipysig.dlsim(sys, u, t)
yr = np.array(yr).flatten()
ys = np.array(ys).flatten()
yc = np.array(yc).flatten()
fig, ax = plt.subplots(4, sharex=True, figsize=(12,8), dpi= 100, facecolor='w', edgecolor='k')
ax[0].plot(t, yr,label='Reference System')
ax[0].plot(t, yc, label='CL System')
ax[0].set_title('Systems response')
ax[0].grid(True)
ax[1].plot(t, ys, label='OL System')
ax[1].set_title('OL Systems response')
ax[1].grid(True)
ax[2].plot(t, y[:len(r)])
ax[2].grid(True)
ax[2].set_title('Experiment data')
ax[3].plot(t, r)
ax[3].grid(True)
ax[3].set_title('Virtual Reference')
# Now add the legend with some customizations.
legend = ax[0].legend(loc='lower right', shadow=True)
# The frame is matplotlib.patches.Rectangle instance surrounding the legend.
frame = legend.get_frame()
frame.set_facecolor('0.90')
# Set the fontsize
for label in legend.get_texts():
label.set_fontsize('large')
for label in legend.get_lines():
label.set_linewidth(1.5) # the legend line width
plt.show()
| gpl-3.0 |
grlee77/scipy | scipy/stats/tests/test_morestats.py | 2 | 103011 | # Author: Travis Oliphant, 2002
#
# Further enhancements and tests added by numerous SciPy developers.
#
import warnings
import numpy as np
from numpy.random import RandomState
from numpy.testing import (assert_array_equal,
assert_almost_equal, assert_array_less, assert_array_almost_equal,
assert_, assert_allclose, assert_equal, suppress_warnings)
import pytest
from pytest import raises as assert_raises
from scipy import optimize
from scipy import stats
from scipy.stats.morestats import _abw_state
from .common_tests import check_named_results
from .._hypotests import _get_wilcoxon_distr
from scipy.stats._binomtest import _binary_search_for_binom_tst
# Matplotlib is not a scipy dependency but is optionally used in probplot, so
# check if it's available
try:
import matplotlib # type: ignore[import]
matplotlib.rcParams['backend'] = 'Agg'
import matplotlib.pyplot as plt # type: ignore[import]
have_matplotlib = True
except Exception:
have_matplotlib = False
# test data gear.dat from NIST for Levene and Bartlett test
# https://www.itl.nist.gov/div898/handbook/eda/section3/eda3581.htm
g1 = [1.006, 0.996, 0.998, 1.000, 0.992, 0.993, 1.002, 0.999, 0.994, 1.000]
g2 = [0.998, 1.006, 1.000, 1.002, 0.997, 0.998, 0.996, 1.000, 1.006, 0.988]
g3 = [0.991, 0.987, 0.997, 0.999, 0.995, 0.994, 1.000, 0.999, 0.996, 0.996]
g4 = [1.005, 1.002, 0.994, 1.000, 0.995, 0.994, 0.998, 0.996, 1.002, 0.996]
g5 = [0.998, 0.998, 0.982, 0.990, 1.002, 0.984, 0.996, 0.993, 0.980, 0.996]
g6 = [1.009, 1.013, 1.009, 0.997, 0.988, 1.002, 0.995, 0.998, 0.981, 0.996]
g7 = [0.990, 1.004, 0.996, 1.001, 0.998, 1.000, 1.018, 1.010, 0.996, 1.002]
g8 = [0.998, 1.000, 1.006, 1.000, 1.002, 0.996, 0.998, 0.996, 1.002, 1.006]
g9 = [1.002, 0.998, 0.996, 0.995, 0.996, 1.004, 1.004, 0.998, 0.999, 0.991]
g10 = [0.991, 0.995, 0.984, 0.994, 0.997, 0.997, 0.991, 0.998, 1.004, 0.997]
class TestBayes_mvs:
def test_basic(self):
# Expected values in this test simply taken from the function. For
# some checks regarding correctness of implementation, see review in
# gh-674
data = [6, 9, 12, 7, 8, 8, 13]
mean, var, std = stats.bayes_mvs(data)
assert_almost_equal(mean.statistic, 9.0)
assert_allclose(mean.minmax, (7.1036502226125329, 10.896349777387467),
rtol=1e-14)
assert_almost_equal(var.statistic, 10.0)
assert_allclose(var.minmax, (3.1767242068607087, 24.45910381334018),
rtol=1e-09)
assert_almost_equal(std.statistic, 2.9724954732045084, decimal=14)
assert_allclose(std.minmax, (1.7823367265645145, 4.9456146050146312),
rtol=1e-14)
def test_empty_input(self):
assert_raises(ValueError, stats.bayes_mvs, [])
def test_result_attributes(self):
x = np.arange(15)
attributes = ('statistic', 'minmax')
res = stats.bayes_mvs(x)
for i in res:
check_named_results(i, attributes)
class TestMvsdist:
def test_basic(self):
data = [6, 9, 12, 7, 8, 8, 13]
mean, var, std = stats.mvsdist(data)
assert_almost_equal(mean.mean(), 9.0)
assert_allclose(mean.interval(0.9), (7.1036502226125329,
10.896349777387467), rtol=1e-14)
assert_almost_equal(var.mean(), 10.0)
assert_allclose(var.interval(0.9), (3.1767242068607087,
24.45910381334018), rtol=1e-09)
assert_almost_equal(std.mean(), 2.9724954732045084, decimal=14)
assert_allclose(std.interval(0.9), (1.7823367265645145,
4.9456146050146312), rtol=1e-14)
def test_empty_input(self):
assert_raises(ValueError, stats.mvsdist, [])
def test_bad_arg(self):
# Raise ValueError if fewer than two data points are given.
data = [1]
assert_raises(ValueError, stats.mvsdist, data)
def test_warns(self):
# regression test for gh-5270
# make sure there are no spurious divide-by-zero warnings
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
[x.mean() for x in stats.mvsdist([1, 2, 3])]
[x.mean() for x in stats.mvsdist([1, 2, 3, 4, 5])]
class TestShapiro:
def test_basic(self):
x1 = [0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46,
4.43, 0.21, 4.75, 0.71, 1.52, 3.24,
0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66]
w, pw = stats.shapiro(x1)
shapiro_test = stats.shapiro(x1)
assert_almost_equal(w, 0.90047299861907959, decimal=6)
assert_almost_equal(shapiro_test.statistic, 0.90047299861907959, decimal=6)
assert_almost_equal(pw, 0.042089745402336121, decimal=6)
assert_almost_equal(shapiro_test.pvalue, 0.042089745402336121, decimal=6)
x2 = [1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11,
3.48, 1.10, 0.88, -0.51, 1.46, 0.52, 6.20, 1.69,
0.08, 3.67, 2.81, 3.49]
w, pw = stats.shapiro(x2)
shapiro_test = stats.shapiro(x2)
assert_almost_equal(w, 0.9590270, decimal=6)
assert_almost_equal(shapiro_test.statistic, 0.9590270, decimal=6)
assert_almost_equal(pw, 0.52460, decimal=3)
assert_almost_equal(shapiro_test.pvalue, 0.52460, decimal=3)
# Verified against R
x3 = stats.norm.rvs(loc=5, scale=3, size=100, random_state=12345678)
w, pw = stats.shapiro(x3)
shapiro_test = stats.shapiro(x3)
assert_almost_equal(w, 0.9772805571556091, decimal=6)
assert_almost_equal(shapiro_test.statistic, 0.9772805571556091, decimal=6)
assert_almost_equal(pw, 0.08144091814756393, decimal=3)
assert_almost_equal(shapiro_test.pvalue, 0.08144091814756393, decimal=3)
# Extracted from original paper
x4 = [0.139, 0.157, 0.175, 0.256, 0.344, 0.413, 0.503, 0.577, 0.614,
0.655, 0.954, 1.392, 1.557, 1.648, 1.690, 1.994, 2.174, 2.206,
3.245, 3.510, 3.571, 4.354, 4.980, 6.084, 8.351]
W_expected = 0.83467
p_expected = 0.000914
w, pw = stats.shapiro(x4)
shapiro_test = stats.shapiro(x4)
assert_almost_equal(w, W_expected, decimal=4)
assert_almost_equal(shapiro_test.statistic, W_expected, decimal=4)
assert_almost_equal(pw, p_expected, decimal=5)
assert_almost_equal(shapiro_test.pvalue, p_expected, decimal=5)
def test_2d(self):
x1 = [[0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46,
4.43, 0.21, 4.75], [0.71, 1.52, 3.24,
0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66]]
w, pw = stats.shapiro(x1)
shapiro_test = stats.shapiro(x1)
assert_almost_equal(w, 0.90047299861907959, decimal=6)
assert_almost_equal(shapiro_test.statistic, 0.90047299861907959, decimal=6)
assert_almost_equal(pw, 0.042089745402336121, decimal=6)
assert_almost_equal(shapiro_test.pvalue, 0.042089745402336121, decimal=6)
x2 = [[1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11,
3.48, 1.10], [0.88, -0.51, 1.46, 0.52, 6.20, 1.69,
0.08, 3.67, 2.81, 3.49]]
w, pw = stats.shapiro(x2)
shapiro_test = stats.shapiro(x2)
assert_almost_equal(w, 0.9590270, decimal=6)
assert_almost_equal(shapiro_test.statistic, 0.9590270, decimal=6)
assert_almost_equal(pw, 0.52460, decimal=3)
assert_almost_equal(shapiro_test.pvalue, 0.52460, decimal=3)
def test_empty_input(self):
assert_raises(ValueError, stats.shapiro, [])
assert_raises(ValueError, stats.shapiro, [[], [], []])
def test_not_enough_values(self):
assert_raises(ValueError, stats.shapiro, [1, 2])
assert_raises(ValueError, stats.shapiro, np.array([[], [2]], dtype=object))
def test_bad_arg(self):
# Length of x is less than 3.
x = [1]
assert_raises(ValueError, stats.shapiro, x)
def test_nan_input(self):
x = np.arange(10.)
x[9] = np.nan
w, pw = stats.shapiro(x)
shapiro_test = stats.shapiro(x)
assert_equal(w, np.nan)
assert_equal(shapiro_test.statistic, np.nan)
assert_almost_equal(pw, 1.0)
assert_almost_equal(shapiro_test.pvalue, 1.0)
class TestAnderson:
def test_normal(self):
rs = RandomState(1234567890)
x1 = rs.standard_exponential(size=50)
x2 = rs.standard_normal(size=50)
A, crit, sig = stats.anderson(x1)
assert_array_less(crit[:-1], A)
A, crit, sig = stats.anderson(x2)
assert_array_less(A, crit[-2:])
v = np.ones(10)
v[0] = 0
A, crit, sig = stats.anderson(v)
# The expected statistic 3.208057 was computed independently of scipy.
# For example, in R:
# > library(nortest)
# > v <- rep(1, 10)
# > v[1] <- 0
# > result <- ad.test(v)
# > result$statistic
# A
# 3.208057
assert_allclose(A, 3.208057)
def test_expon(self):
rs = RandomState(1234567890)
x1 = rs.standard_exponential(size=50)
x2 = rs.standard_normal(size=50)
A, crit, sig = stats.anderson(x1, 'expon')
assert_array_less(A, crit[-2:])
with np.errstate(all='ignore'):
A, crit, sig = stats.anderson(x2, 'expon')
assert_(A > crit[-1])
def test_gumbel(self):
# Regression test for gh-6306. Before that issue was fixed,
# this case would return a2=inf.
v = np.ones(100)
v[0] = 0.0
a2, crit, sig = stats.anderson(v, 'gumbel')
# A brief reimplementation of the calculation of the statistic.
n = len(v)
xbar, s = stats.gumbel_l.fit(v)
logcdf = stats.gumbel_l.logcdf(v, xbar, s)
logsf = stats.gumbel_l.logsf(v, xbar, s)
i = np.arange(1, n+1)
expected_a2 = -n - np.mean((2*i - 1) * (logcdf + logsf[::-1]))
assert_allclose(a2, expected_a2)
def test_bad_arg(self):
assert_raises(ValueError, stats.anderson, [1], dist='plate_of_shrimp')
def test_result_attributes(self):
rs = RandomState(1234567890)
x = rs.standard_exponential(size=50)
res = stats.anderson(x)
attributes = ('statistic', 'critical_values', 'significance_level')
check_named_results(res, attributes)
def test_gumbel_l(self):
# gh-2592, gh-6337
# Adds support to 'gumbel_r' and 'gumbel_l' as valid inputs for dist.
rs = RandomState(1234567890)
x = rs.gumbel(size=100)
A1, crit1, sig1 = stats.anderson(x, 'gumbel')
A2, crit2, sig2 = stats.anderson(x, 'gumbel_l')
assert_allclose(A2, A1)
def test_gumbel_r(self):
# gh-2592, gh-6337
# Adds support to 'gumbel_r' and 'gumbel_l' as valid inputs for dist.
rs = RandomState(1234567890)
x1 = rs.gumbel(size=100)
x2 = np.ones(100)
# A constant array is a degenerate case and breaks gumbel_r.fit, so
# change one value in x2.
x2[0] = 0.996
A1, crit1, sig1 = stats.anderson(x1, 'gumbel_r')
A2, crit2, sig2 = stats.anderson(x2, 'gumbel_r')
assert_array_less(A1, crit1[-2:])
assert_(A2 > crit2[-1])
class TestAndersonKSamp:
def test_example1a(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass a mixture of lists and arrays
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False)
assert_almost_equal(Tk, 4.449, 3)
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
tm[0:5], 4)
assert_allclose(p, 0.0021, atol=0.00025)
def test_example1b(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass arrays
t1 = np.array([38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0])
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=True)
assert_almost_equal(Tk, 4.480, 3)
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
tm[0:5], 4)
assert_allclose(p, 0.0020, atol=0.00025)
def test_example2a(self):
# Example data taken from an earlier technical report of
# Scholz and Stephens
# Pass lists instead of arrays
t1 = [194, 15, 41, 29, 33, 181]
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
118, 25, 156, 310, 76, 26, 44, 23, 62]
t5 = [130, 208, 70, 101, 208]
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
22, 139, 210, 97, 30, 23, 13, 14]
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
t12 = [50, 254, 5, 283, 35, 12]
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
61, 34]
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
t9, t10, t11, t12, t13, t14),
midrank=False)
assert_almost_equal(Tk, 3.288, 3)
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
tm[0:5], 4)
assert_allclose(p, 0.0041, atol=0.00025)
def test_example2b(self):
# Example data taken from an earlier technical report of
# Scholz and Stephens
t1 = [194, 15, 41, 29, 33, 181]
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
118, 25, 156, 310, 76, 26, 44, 23, 62]
t5 = [130, 208, 70, 101, 208]
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
22, 139, 210, 97, 30, 23, 13, 14]
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
t12 = [50, 254, 5, 283, 35, 12]
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
61, 34]
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
t9, t10, t11, t12, t13, t14),
midrank=True)
assert_almost_equal(Tk, 3.294, 3)
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
tm[0:5], 4)
assert_allclose(p, 0.0041, atol=0.00025)
def test_R_kSamples(self):
# test values generates with R package kSamples
# package version 1.2-6 (2017-06-14)
# r1 = 1:100
# continuous case (no ties) --> version 1
# res <- kSamples::ad.test(r1, r1 + 40.5)
# res$ad[1, "T.AD"] # 41.105
# res$ad[1, " asympt. P-value"] # 5.8399e-18
#
# discrete case (ties allowed) --> version 2 (here: midrank=True)
# res$ad[2, "T.AD"] # 41.235
#
# res <- kSamples::ad.test(r1, r1 + .5)
# res$ad[1, "T.AD"] # -1.2824
# res$ad[1, " asympt. P-value"] # 1
# res$ad[2, "T.AD"] # -1.2944
#
# res <- kSamples::ad.test(r1, r1 + 7.5)
# res$ad[1, "T.AD"] # 1.4923
# res$ad[1, " asympt. P-value"] # 0.077501
#
# res <- kSamples::ad.test(r1, r1 + 6)
# res$ad[2, "T.AD"] # 0.63892
# res$ad[2, " asympt. P-value"] # 0.17981
#
# res <- kSamples::ad.test(r1, r1 + 11.5)
# res$ad[1, "T.AD"] # 4.5042
# res$ad[1, " asympt. P-value"] # 0.00545
#
# res <- kSamples::ad.test(r1, r1 + 13.5)
# res$ad[1, "T.AD"] # 6.2982
# res$ad[1, " asympt. P-value"] # 0.00118
x1 = np.linspace(1, 100, 100)
# test case: different distributions;p-value floored at 0.001
# test case for issue #5493 / #8536
with suppress_warnings() as sup:
sup.filter(UserWarning, message='p-value floored')
s, _, p = stats.anderson_ksamp([x1, x1 + 40.5], midrank=False)
assert_almost_equal(s, 41.105, 3)
assert_equal(p, 0.001)
with suppress_warnings() as sup:
sup.filter(UserWarning, message='p-value floored')
s, _, p = stats.anderson_ksamp([x1, x1 + 40.5])
assert_almost_equal(s, 41.235, 3)
assert_equal(p, 0.001)
# test case: similar distributions --> p-value capped at 0.25
with suppress_warnings() as sup:
sup.filter(UserWarning, message='p-value capped')
s, _, p = stats.anderson_ksamp([x1, x1 + .5], midrank=False)
assert_almost_equal(s, -1.2824, 4)
assert_equal(p, 0.25)
with suppress_warnings() as sup:
sup.filter(UserWarning, message='p-value capped')
s, _, p = stats.anderson_ksamp([x1, x1 + .5])
assert_almost_equal(s, -1.2944, 4)
assert_equal(p, 0.25)
# test case: check interpolated p-value in [0.01, 0.25] (no ties)
s, _, p = stats.anderson_ksamp([x1, x1 + 7.5], midrank=False)
assert_almost_equal(s, 1.4923, 4)
assert_allclose(p, 0.0775, atol=0.005, rtol=0)
# test case: check interpolated p-value in [0.01, 0.25] (w/ ties)
s, _, p = stats.anderson_ksamp([x1, x1 + 6])
assert_almost_equal(s, 0.6389, 4)
assert_allclose(p, 0.1798, atol=0.005, rtol=0)
# test extended critical values for p=0.001 and p=0.005
s, _, p = stats.anderson_ksamp([x1, x1 + 11.5], midrank=False)
assert_almost_equal(s, 4.5042, 4)
assert_allclose(p, 0.00545, atol=0.0005, rtol=0)
s, _, p = stats.anderson_ksamp([x1, x1 + 13.5], midrank=False)
assert_almost_equal(s, 6.2982, 4)
assert_allclose(p, 0.00118, atol=0.0001, rtol=0)
def test_not_enough_samples(self):
assert_raises(ValueError, stats.anderson_ksamp, np.ones(5))
def test_no_distinct_observations(self):
assert_raises(ValueError, stats.anderson_ksamp,
(np.ones(5), np.ones(5)))
def test_empty_sample(self):
assert_raises(ValueError, stats.anderson_ksamp, (np.ones(5), []))
def test_result_attributes(self):
# Pass a mixture of lists and arrays
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
res = stats.anderson_ksamp((t1, t2), midrank=False)
attributes = ('statistic', 'critical_values', 'significance_level')
check_named_results(res, attributes)
class TestAnsari:
def test_small(self):
x = [1, 2, 3, 3, 4]
y = [3, 2, 6, 1, 6, 1, 4, 1]
with suppress_warnings() as sup:
sup.filter(UserWarning, "Ties preclude use of exact statistic.")
W, pval = stats.ansari(x, y)
assert_almost_equal(W, 23.5, 11)
assert_almost_equal(pval, 0.13499256881897437, 11)
def test_approx(self):
ramsay = np.array((111, 107, 100, 99, 102, 106, 109, 108, 104, 99,
101, 96, 97, 102, 107, 113, 116, 113, 110, 98))
parekh = np.array((107, 108, 106, 98, 105, 103, 110, 105, 104,
100, 96, 108, 103, 104, 114, 114, 113, 108,
106, 99))
with suppress_warnings() as sup:
sup.filter(UserWarning, "Ties preclude use of exact statistic.")
W, pval = stats.ansari(ramsay, parekh)
assert_almost_equal(W, 185.5, 11)
assert_almost_equal(pval, 0.18145819972867083, 11)
def test_exact(self):
W, pval = stats.ansari([1, 2, 3, 4], [15, 5, 20, 8, 10, 12])
assert_almost_equal(W, 10.0, 11)
assert_almost_equal(pval, 0.533333333333333333, 7)
def test_bad_arg(self):
assert_raises(ValueError, stats.ansari, [], [1])
assert_raises(ValueError, stats.ansari, [1], [])
def test_result_attributes(self):
x = [1, 2, 3, 3, 4]
y = [3, 2, 6, 1, 6, 1, 4, 1]
with suppress_warnings() as sup:
sup.filter(UserWarning, "Ties preclude use of exact statistic.")
res = stats.ansari(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_bad_alternative(self):
# invalid value for alternative must raise a ValueError
x1 = [1, 2, 3, 4]
x2 = [5, 6, 7, 8]
match = "'alternative' must be 'two-sided'"
with assert_raises(ValueError, match=match):
stats.ansari(x1, x2, alternative='foo')
def test_alternative_exact(self):
x1 = [-5, 1, 5, 10, 15, 20, 25] # high scale, loc=10
x2 = [7.5, 8.5, 9.5, 10.5, 11.5, 12.5] # low scale, loc=10
# ratio of scales is greater than 1. So, the
# p-value must be high when `alternative='less'`
# and low when `alternative='greater'`.
statistic, pval = stats.ansari(x1, x2)
pval_l = stats.ansari(x1, x2, alternative='less').pvalue
pval_g = stats.ansari(x1, x2, alternative='greater').pvalue
assert pval_l > 0.95
assert pval_g < 0.05 # level of significance.
# also check if the p-values sum up to 1 plus the the probability
# mass under the calculated statistic.
prob = _abw_state.pmf(statistic, len(x1), len(x2))
assert_allclose(pval_g + pval_l, 1 + prob, atol=1e-12)
# also check if one of the one-sided p-value equals half the
# two-sided p-value and the other one-sided p-value is its
# compliment.
assert_allclose(pval_g, pval/2, atol=1e-12)
assert_allclose(pval_l, 1+prob-pval/2, atol=1e-12)
# sanity check. The result should flip if
# we exchange x and y.
pval_l_reverse = stats.ansari(x2, x1, alternative='less').pvalue
pval_g_reverse = stats.ansari(x2, x1, alternative='greater').pvalue
assert pval_l_reverse < 0.05
assert pval_g_reverse > 0.95
@pytest.mark.parametrize(
'x, y, alternative, expected',
# the tests are designed in such a way that the
# if else statement in ansari test for exact
# mode is covered.
[([1, 2, 3, 4], [5, 6, 7, 8], 'less', 0.6285714285714),
([1, 2, 3, 4], [5, 6, 7, 8], 'greater', 0.6285714285714),
([1, 2, 3], [4, 5, 6, 7, 8], 'less', 0.8928571428571),
([1, 2, 3], [4, 5, 6, 7, 8], 'greater', 0.2857142857143),
([1, 2, 3, 4, 5], [6, 7, 8], 'less', 0.2857142857143),
([1, 2, 3, 4, 5], [6, 7, 8], 'greater', 0.8928571428571)]
)
def test_alternative_exact_with_R(self, x, y, alternative, expected):
# testing with R on arbitrary data
# Sample R code used for the third test case above:
# ```R
# > options(digits=16)
# > x <- c(1,2,3)
# > y <- c(4,5,6,7,8)
# > ansari.test(x, y, alternative='less', exact=TRUE)
#
# Ansari-Bradley test
#
# data: x and y
# AB = 6, p-value = 0.8928571428571
# alternative hypothesis: true ratio of scales is less than 1
#
# ```
pval = stats.ansari(x, y, alternative=alternative).pvalue
assert_allclose(pval, expected, atol=1e-12)
def test_alternative_approx(self):
# intuitive tests for approximation
x1 = stats.norm.rvs(0, 5, size=100, random_state=123)
x2 = stats.norm.rvs(0, 2, size=100, random_state=123)
# for m > 55 or n > 55, the test should automatically
# switch to approximation.
pval_l = stats.ansari(x1, x2, alternative='less').pvalue
pval_g = stats.ansari(x1, x2, alternative='greater').pvalue
assert_allclose(pval_l, 1.0, atol=1e-12)
assert_allclose(pval_g, 0.0, atol=1e-12)
# also check if one of the one-sided p-value equals half the
# two-sided p-value and the other one-sided p-value is its
# compliment.
x1 = stats.norm.rvs(0, 2, size=60, random_state=123)
x2 = stats.norm.rvs(0, 1.5, size=60, random_state=123)
pval = stats.ansari(x1, x2).pvalue
pval_l = stats.ansari(x1, x2, alternative='less').pvalue
pval_g = stats.ansari(x1, x2, alternative='greater').pvalue
assert_allclose(pval_g, pval/2, atol=1e-12)
assert_allclose(pval_l, 1-pval/2, atol=1e-12)
class TestBartlett:
def test_data(self):
# https://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
T, pval = stats.bartlett(*args)
assert_almost_equal(T, 20.78587342806484, 7)
assert_almost_equal(pval, 0.0136358632781, 7)
def test_bad_arg(self):
# Too few args raises ValueError.
assert_raises(ValueError, stats.bartlett, [1])
def test_result_attributes(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
res = stats.bartlett(*args)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_empty_arg(self):
args = (g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, [])
assert_equal((np.nan, np.nan), stats.bartlett(*args))
# temporary fix for issue #9252: only accept 1d input
def test_1d_input(self):
x = np.array([[1, 2], [3, 4]])
assert_raises(ValueError, stats.bartlett, g1, x)
class TestLevene:
def test_data(self):
# https://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
W, pval = stats.levene(*args)
assert_almost_equal(W, 1.7059176930008939, 7)
assert_almost_equal(pval, 0.0990829755522, 7)
def test_trimmed1(self):
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
W1, pval1 = stats.levene(g1, g2, g3, center='mean')
W2, pval2 = stats.levene(g1, g2, g3, center='trimmed',
proportiontocut=0.0)
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_trimmed2(self):
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
np.random.seed(1234)
x2 = np.random.permutation(x)
# Use center='trimmed'
W0, pval0 = stats.levene(x, y, center='trimmed',
proportiontocut=0.125)
W1, pval1 = stats.levene(x2, y, center='trimmed',
proportiontocut=0.125)
# Trim the data here, and use center='mean'
W2, pval2 = stats.levene(x[1:-1], y[1:-1], center='mean')
# Result should be the same.
assert_almost_equal(W0, W2)
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_equal_mean_median(self):
x = np.linspace(-1, 1, 21)
np.random.seed(1234)
x2 = np.random.permutation(x)
y = x**3
W1, pval1 = stats.levene(x, y, center='mean')
W2, pval2 = stats.levene(x2, y, center='median')
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_bad_keyword(self):
x = np.linspace(-1, 1, 21)
assert_raises(TypeError, stats.levene, x, x, portiontocut=0.1)
def test_bad_center_value(self):
x = np.linspace(-1, 1, 21)
assert_raises(ValueError, stats.levene, x, x, center='trim')
def test_too_few_args(self):
assert_raises(ValueError, stats.levene, [1])
def test_result_attributes(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
res = stats.levene(*args)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
# temporary fix for issue #9252: only accept 1d input
def test_1d_input(self):
x = np.array([[1, 2], [3, 4]])
assert_raises(ValueError, stats.levene, g1, x)
class TestBinomP:
"""Tests for stats.binom_test."""
binom_test_func = staticmethod(stats.binom_test)
def test_data(self):
pval = self.binom_test_func(100, 250)
assert_almost_equal(pval, 0.0018833009350757682, 11)
pval = self.binom_test_func(201, 405)
assert_almost_equal(pval, 0.92085205962670713, 11)
pval = self.binom_test_func([682, 243], p=3/4)
assert_almost_equal(pval, 0.38249155957481695, 11)
def test_bad_len_x(self):
# Length of x must be 1 or 2.
assert_raises(ValueError, self.binom_test_func, [1, 2, 3])
def test_bad_n(self):
# len(x) is 1, but n is invalid.
# Missing n
assert_raises(ValueError, self.binom_test_func, [100])
# n less than x[0]
assert_raises(ValueError, self.binom_test_func, [100], n=50)
def test_bad_p(self):
assert_raises(ValueError,
self.binom_test_func, [50, 50], p=2.0)
def test_alternatives(self):
res = self.binom_test_func(51, 235, p=1/6, alternative='less')
assert_almost_equal(res, 0.982022657605858)
res = self.binom_test_func(51, 235, p=1/6, alternative='greater')
assert_almost_equal(res, 0.02654424571169085)
res = self.binom_test_func(51, 235, p=1/6, alternative='two-sided')
assert_almost_equal(res, 0.0437479701823997)
class TestBinomTestP(TestBinomP):
"""
Tests for stats.binomtest as a replacement for stats.binom_test.
"""
@staticmethod
def binom_test_func(x, n=None, p=0.5, alternative='two-sided'):
# This processing of x and n is copied from from binom_test.
x = np.atleast_1d(x).astype(np.int_)
if len(x) == 2:
n = x[1] + x[0]
x = x[0]
elif len(x) == 1:
x = x[0]
if n is None or n < x:
raise ValueError("n must be >= x")
n = np.int_(n)
else:
raise ValueError("Incorrect length for x.")
result = stats.binomtest(x, n, p=p, alternative=alternative)
return result.pvalue
class TestBinomTest:
"""Tests for stats.binomtest."""
# Expected results here are from old binom_test function.
# The alternative will be left unspecified which defaults
# it to two-sided.
@pytest.mark.xfail_on_32bit("The large inputs make these tests "
"sensitive to machine epsilon level")
def test_two_sided_pvalues1(self):
# These tests work on all OS's but fail on
# Linux_Python_37_32bit_full due to numerical issues caused
# by large inputs.
rtol = 5e-13 # aarch64 observed rtol: 3.5e-13
res = stats.binomtest(10079999, 21000000, 0.48)
assert_allclose(res.pvalue, 0.979042561004596, rtol=rtol)
res = stats.binomtest(10079990, 21000000, 0.48)
assert_allclose(res.pvalue, 0.9785298857599378, rtol=rtol)
res = stats.binomtest(10080009, 21000000, 0.48)
assert_allclose(res.pvalue, 0.9786038762958954, rtol=rtol)
res = stats.binomtest(10080017, 21000000, 0.48)
assert_allclose(res.pvalue, 0.9778567637538729, rtol=rtol)
@pytest.mark.xfail_on_32bit("The large inputs make these tests "
"sensitive to machine epsilon level")
def test_two_sided_pvalues2(self):
rtol = 1e-14 # no aarch64 failure with 1e-15, preemptive bump
res = stats.binomtest(9, n=21, p=0.48)
assert_allclose(res.pvalue, 0.6689672431938848, rtol=rtol)
res = stats.binomtest(4, 21, 0.48)
assert_allclose(res.pvalue, 0.008139563452105921, rtol=rtol)
res = stats.binomtest(11, 21, 0.48)
assert_allclose(res.pvalue, 0.8278629664608201, rtol=rtol)
res = stats.binomtest(7, 21, 0.48)
assert_allclose(res.pvalue, 0.19667729017182273, rtol=rtol)
res = stats.binomtest(3, 10, .5)
assert_allclose(res.pvalue, 0.3437499999999999, rtol=rtol)
res = stats.binomtest(2, 2, .4)
assert_allclose(res.pvalue, 0.16000000000000003, rtol=rtol)
res = stats.binomtest(2, 4, .3)
assert_allclose(res.pvalue, 0.5883999999999999, rtol=rtol)
@pytest.mark.xfail_on_32bit("The large inputs make these tests "
"sensitive to machine epsilon level")
def test_edge_cases(self):
rtol = 1e-14 # aarch64 observed rtol: 1.33e-15
res = stats.binomtest(484, 967, 0.5)
assert_allclose(res.pvalue, 0.999999999998212, rtol=rtol)
res = stats.binomtest(3, 47, 3/47)
assert_allclose(res.pvalue, 0.9999999999999998, rtol=rtol)
res = stats.binomtest(13, 46, 13/46)
assert_allclose(res.pvalue, 0.9999999999999987, rtol=rtol)
res = stats.binomtest(15, 44, 15/44)
assert_allclose(res.pvalue, 0.9999999999999989, rtol=rtol)
res = stats.binomtest(7, 13, 0.5)
assert_allclose(res.pvalue, 0.9999999999999999, rtol=rtol)
res = stats.binomtest(6, 11, 0.5)
assert_allclose(res.pvalue, 0.9999999999999997, rtol=rtol)
def test_binary_srch_for_binom_tst(self):
# Test that old behavior of binomtest is maintained
# by the new binary search method in cases where d
# exactly equals the input on one side.
n = 10
p = 0.5
k = 3
# First test for the case where k > mode of PMF
i = np.arange(np.ceil(p * n), n+1)
d = stats.binom.pmf(k, n, p)
# Old way of calculating y, probably consistent with R.
y1 = np.sum(stats.binom.pmf(i, n, p) <= d, axis=0)
# New way with binary search.
ix = _binary_search_for_binom_tst(lambda x1:
-stats.binom.pmf(x1, n, p),
-d, np.ceil(p * n), n)
y2 = n - ix + int(d == stats.binom.pmf(ix, n, p))
assert_allclose(y1, y2, rtol=1e-9)
# Now test for the other side.
k = 7
i = np.arange(np.floor(p * n) + 1)
d = stats.binom.pmf(k, n, p)
# Old way of calculating y.
y1 = np.sum(stats.binom.pmf(i, n, p) <= d, axis=0)
# New way with binary search.
ix = _binary_search_for_binom_tst(lambda x1:
stats.binom.pmf(x1, n, p),
d, 0, np.floor(p * n))
y2 = ix + 1
assert_allclose(y1, y2, rtol=1e-9)
# Expected results here are from R 3.6.2 binom.test
@pytest.mark.parametrize('alternative, pval, ci_low, ci_high',
[('less', 0.1488311, 0.0, 0.2772002),
('greater', 0.9004696, 0.1366613, 1.0),
('two-sided', 0.2983721, 0.1266556, 0.2918427)])
def test_confidence_intervals1(self, alternative, pval, ci_low, ci_high):
res = stats.binomtest(20, n=100, p=0.25, alternative=alternative)
assert_allclose(res.pvalue, pval, rtol=1e-6)
assert_equal(res.proportion_estimate, 0.2)
ci = res.proportion_ci(confidence_level=0.95)
assert_allclose((ci.low, ci.high), (ci_low, ci_high), rtol=1e-6)
# Expected results here are from R 3.6.2 binom.test.
@pytest.mark.parametrize('alternative, pval, ci_low, ci_high',
[('less',
0.005656361, 0.0, 0.1872093),
('greater',
0.9987146, 0.008860761, 1.0),
('two-sided',
0.01191714, 0.006872485, 0.202706269)])
def test_confidence_intervals2(self, alternative, pval, ci_low, ci_high):
res = stats.binomtest(3, n=50, p=0.2, alternative=alternative)
assert_allclose(res.pvalue, pval, rtol=1e-6)
assert_equal(res.proportion_estimate, 0.06)
ci = res.proportion_ci(confidence_level=0.99)
assert_allclose((ci.low, ci.high), (ci_low, ci_high), rtol=1e-6)
# Expected results here are from R 3.6.2 binom.test.
@pytest.mark.parametrize('alternative, pval, ci_high',
[('less', 0.05631351, 0.2588656),
('greater', 1.0, 1.0),
('two-sided', 0.07604122, 0.3084971)])
def test_confidence_interval_exact_k0(self, alternative, pval, ci_high):
# Test with k=0, n = 10.
res = stats.binomtest(0, 10, p=0.25, alternative=alternative)
assert_allclose(res.pvalue, pval, rtol=1e-6)
ci = res.proportion_ci(confidence_level=0.95)
assert_equal(ci.low, 0.0)
assert_allclose(ci.high, ci_high, rtol=1e-6)
# Expected results here are from R 3.6.2 binom.test.
@pytest.mark.parametrize('alternative, pval, ci_low',
[('less', 1.0, 0.0),
('greater', 9.536743e-07, 0.7411344),
('two-sided', 9.536743e-07, 0.6915029)])
def test_confidence_interval_exact_k_is_n(self, alternative, pval, ci_low):
# Test with k = n = 10.
res = stats.binomtest(10, 10, p=0.25, alternative=alternative)
assert_allclose(res.pvalue, pval, rtol=1e-6)
ci = res.proportion_ci(confidence_level=0.95)
assert_equal(ci.high, 1.0)
assert_allclose(ci.low, ci_low, rtol=1e-6)
# Expected results are from the prop.test function in R 3.6.2.
@pytest.mark.parametrize(
'k, alternative, corr, conf, ci_low, ci_high',
[[3, 'two-sided', True, 0.95, 0.08094782, 0.64632928],
[3, 'two-sided', True, 0.99, 0.0586329, 0.7169416],
[3, 'two-sided', False, 0.95, 0.1077913, 0.6032219],
[3, 'two-sided', False, 0.99, 0.07956632, 0.6799753],
[3, 'less', True, 0.95, 0.0, 0.6043476],
[3, 'less', True, 0.99, 0.0, 0.6901811],
[3, 'less', False, 0.95, 0.0, 0.5583002],
[3, 'less', False, 0.99, 0.0, 0.6507187],
[3, 'greater', True, 0.95, 0.09644904, 1.0],
[3, 'greater', True, 0.99, 0.06659141, 1.0],
[3, 'greater', False, 0.95, 0.1268766, 1.0],
[3, 'greater', False, 0.99, 0.08974147, 1.0],
[0, 'two-sided', True, 0.95, 0.0, 0.3445372],
[0, 'two-sided', False, 0.95, 0.0, 0.2775328],
[0, 'less', True, 0.95, 0.0, 0.2847374],
[0, 'less', False, 0.95, 0.0, 0.212942],
[0, 'greater', True, 0.95, 0.0, 1.0],
[0, 'greater', False, 0.95, 0.0, 1.0],
[10, 'two-sided', True, 0.95, 0.6554628, 1.0],
[10, 'two-sided', False, 0.95, 0.7224672, 1.0],
[10, 'less', True, 0.95, 0.0, 1.0],
[10, 'less', False, 0.95, 0.0, 1.0],
[10, 'greater', True, 0.95, 0.7152626, 1.0],
[10, 'greater', False, 0.95, 0.787058, 1.0]]
)
def test_ci_wilson_method(self, k, alternative, corr, conf,
ci_low, ci_high):
res = stats.binomtest(k, n=10, p=0.1, alternative=alternative)
if corr:
method = 'wilsoncc'
else:
method = 'wilson'
ci = res.proportion_ci(confidence_level=conf, method=method)
assert_allclose((ci.low, ci.high), (ci_low, ci_high), rtol=1e-6)
def test_estimate_equals_hypothesized_prop(self):
# Test the special case where the estimated proportion equals
# the hypothesized proportion. When alternative is 'two-sided',
# the p-value is 1.
res = stats.binomtest(4, 16, 0.25)
assert_equal(res.proportion_estimate, 0.25)
assert_equal(res.pvalue, 1.0)
@pytest.mark.parametrize('k, n', [(0, 0), (-1, 2)])
def test_invalid_k_n(self, k, n):
with pytest.raises(ValueError,
match="must be an integer not less than"):
stats.binomtest(k, n)
def test_invalid_k_too_big(self):
with pytest.raises(ValueError,
match="k must not be greater than n"):
stats.binomtest(11, 10, 0.25)
def test_invalid_confidence_level(self):
res = stats.binomtest(3, n=10, p=0.1)
with pytest.raises(ValueError, match="must be in the interval"):
res.proportion_ci(confidence_level=-1)
def test_invalid_ci_method(self):
res = stats.binomtest(3, n=10, p=0.1)
with pytest.raises(ValueError, match="method must be"):
res.proportion_ci(method="plate of shrimp")
class TestFligner:
def test_data(self):
# numbers from R: fligner.test in package stats
x1 = np.arange(5)
assert_array_almost_equal(stats.fligner(x1, x1**2),
(3.2282229927203536, 0.072379187848207877),
11)
def test_trimmed1(self):
# Perturb input to break ties in the transformed data
# See https://github.com/scipy/scipy/pull/8042 for more details
rs = np.random.RandomState(123)
_perturb = lambda g: (np.asarray(g) + 1e-10*rs.randn(len(g))).tolist()
g1_ = _perturb(g1)
g2_ = _perturb(g2)
g3_ = _perturb(g3)
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
Xsq1, pval1 = stats.fligner(g1_, g2_, g3_, center='mean')
Xsq2, pval2 = stats.fligner(g1_, g2_, g3_, center='trimmed',
proportiontocut=0.0)
assert_almost_equal(Xsq1, Xsq2)
assert_almost_equal(pval1, pval2)
def test_trimmed2(self):
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
# Use center='trimmed'
Xsq1, pval1 = stats.fligner(x, y, center='trimmed',
proportiontocut=0.125)
# Trim the data here, and use center='mean'
Xsq2, pval2 = stats.fligner(x[1:-1], y[1:-1], center='mean')
# Result should be the same.
assert_almost_equal(Xsq1, Xsq2)
assert_almost_equal(pval1, pval2)
# The following test looks reasonable at first, but fligner() uses the
# function stats.rankdata(), and in one of the cases in this test,
# there are ties, while in the other (because of normal rounding
# errors) there are not. This difference leads to differences in the
# third significant digit of W.
#
#def test_equal_mean_median(self):
# x = np.linspace(-1,1,21)
# y = x**3
# W1, pval1 = stats.fligner(x, y, center='mean')
# W2, pval2 = stats.fligner(x, y, center='median')
# assert_almost_equal(W1, W2)
# assert_almost_equal(pval1, pval2)
def test_bad_keyword(self):
x = np.linspace(-1, 1, 21)
assert_raises(TypeError, stats.fligner, x, x, portiontocut=0.1)
def test_bad_center_value(self):
x = np.linspace(-1, 1, 21)
assert_raises(ValueError, stats.fligner, x, x, center='trim')
def test_bad_num_args(self):
# Too few args raises ValueError.
assert_raises(ValueError, stats.fligner, [1])
def test_empty_arg(self):
x = np.arange(5)
assert_equal((np.nan, np.nan), stats.fligner(x, x**2, []))
class TestMood:
def test_mood(self):
# numbers from R: mood.test in package stats
x1 = np.arange(5)
assert_array_almost_equal(stats.mood(x1, x1**2),
(-1.3830857299399906, 0.16663858066771478),
11)
def test_mood_order_of_args(self):
# z should change sign when the order of arguments changes, pvalue
# should not change
np.random.seed(1234)
x1 = np.random.randn(10, 1)
x2 = np.random.randn(15, 1)
z1, p1 = stats.mood(x1, x2)
z2, p2 = stats.mood(x2, x1)
assert_array_almost_equal([z1, p1], [-z2, p2])
def test_mood_with_axis_none(self):
# Test with axis = None, compare with results from R
x1 = [-0.626453810742332, 0.183643324222082, -0.835628612410047,
1.59528080213779, 0.329507771815361, -0.820468384118015,
0.487429052428485, 0.738324705129217, 0.575781351653492,
-0.305388387156356, 1.51178116845085, 0.389843236411431,
-0.621240580541804, -2.2146998871775, 1.12493091814311,
-0.0449336090152309, -0.0161902630989461, 0.943836210685299,
0.821221195098089, 0.593901321217509]
x2 = [-0.896914546624981, 0.184849184646742, 1.58784533120882,
-1.13037567424629, -0.0802517565509893, 0.132420284381094,
0.707954729271733, -0.23969802417184, 1.98447393665293,
-0.138787012119665, 0.417650750792556, 0.981752777463662,
-0.392695355503813, -1.03966897694891, 1.78222896030858,
-2.31106908460517, 0.878604580921265, 0.035806718015226,
1.01282869212708, 0.432265154539617, 2.09081920524915,
-1.19992581964387, 1.58963820029007, 1.95465164222325,
0.00493777682814261, -2.45170638784613, 0.477237302613617,
-0.596558168631403, 0.792203270299649, 0.289636710177348]
x1 = np.array(x1)
x2 = np.array(x2)
x1.shape = (10, 2)
x2.shape = (15, 2)
assert_array_almost_equal(stats.mood(x1, x2, axis=None),
[-1.31716607555, 0.18778296257])
def test_mood_2d(self):
# Test if the results of mood test in 2-D case are consistent with the
# R result for the same inputs. Numbers from R mood.test().
ny = 5
np.random.seed(1234)
x1 = np.random.randn(10, ny)
x2 = np.random.randn(15, ny)
z_vectest, pval_vectest = stats.mood(x1, x2)
for j in range(ny):
assert_array_almost_equal([z_vectest[j], pval_vectest[j]],
stats.mood(x1[:, j], x2[:, j]))
# inverse order of dimensions
x1 = x1.transpose()
x2 = x2.transpose()
z_vectest, pval_vectest = stats.mood(x1, x2, axis=1)
for i in range(ny):
# check axis handling is self consistent
assert_array_almost_equal([z_vectest[i], pval_vectest[i]],
stats.mood(x1[i, :], x2[i, :]))
def test_mood_3d(self):
shape = (10, 5, 6)
np.random.seed(1234)
x1 = np.random.randn(*shape)
x2 = np.random.randn(*shape)
for axis in range(3):
z_vectest, pval_vectest = stats.mood(x1, x2, axis=axis)
# Tests that result for 3-D arrays is equal to that for the
# same calculation on a set of 1-D arrays taken from the
# 3-D array
axes_idx = ([1, 2], [0, 2], [0, 1]) # the two axes != axis
for i in range(shape[axes_idx[axis][0]]):
for j in range(shape[axes_idx[axis][1]]):
if axis == 0:
slice1 = x1[:, i, j]
slice2 = x2[:, i, j]
elif axis == 1:
slice1 = x1[i, :, j]
slice2 = x2[i, :, j]
else:
slice1 = x1[i, j, :]
slice2 = x2[i, j, :]
assert_array_almost_equal([z_vectest[i, j],
pval_vectest[i, j]],
stats.mood(slice1, slice2))
def test_mood_bad_arg(self):
# Raise ValueError when the sum of the lengths of the args is
# less than 3
assert_raises(ValueError, stats.mood, [1], [])
def test_mood_alternative(self):
np.random.seed(0)
x = stats.norm.rvs(scale=0.75, size=100)
y = stats.norm.rvs(scale=1.25, size=100)
stat1, p1 = stats.mood(x, y, alternative='two-sided')
stat2, p2 = stats.mood(x, y, alternative='less')
stat3, p3 = stats.mood(x, y, alternative='greater')
assert stat1 == stat2 == stat3
assert_allclose(p1, 0, atol=1e-7)
assert_allclose(p2, p1/2)
assert_allclose(p3, 1 - p1/2)
with pytest.raises(ValueError, match="alternative must be..."):
stats.mood(x, y, alternative='ekki-ekki')
@pytest.mark.xfail(reason="SciPy needs tie correction like R (gh-13730)")
@pytest.mark.parametrize("alternative, expected",
[('two-sided', (1.037127561496, 0.299676411857)),
('less', (1.0371275614961, 0.8501617940715)),
('greater', (1.037127561496, 0.1498382059285))])
def test_mood_alternative_against_R(self, alternative, expected):
## Test againts R mood.test: https://rdrr.io/r/stats/mood.test.html
# options(digits=16)
# x <- c(111, 107, 100, 99, 102, 106, 109, 108, 104, 99,
# 101, 96, 97, 102, 107, 113, 116, 113, 110, 98)
# y <- c(107, 108, 106, 98, 105, 103, 110, 105, 104,
# 100, 96, 108, 103, 104, 114, 114, 113, 108, 106, 99)
# mood.test(x, y, alternative='less')
x = [111, 107, 100, 99, 102, 106, 109, 108, 104, 99,
101, 96, 97, 102, 107, 113, 116, 113, 110, 98]
y = [107, 108, 106, 98, 105, 103, 110, 105, 104, 100,
96, 108, 103, 104, 114, 114, 113, 108, 106, 99]
res = stats.mood(x, y, alternative=alternative)
assert_allclose(res, expected)
class TestProbplot:
def test_basic(self):
x = stats.norm.rvs(size=20, random_state=12345)
osm, osr = stats.probplot(x, fit=False)
osm_expected = [-1.8241636, -1.38768012, -1.11829229, -0.91222575,
-0.73908135, -0.5857176, -0.44506467, -0.31273668,
-0.18568928, -0.06158146, 0.06158146, 0.18568928,
0.31273668, 0.44506467, 0.5857176, 0.73908135,
0.91222575, 1.11829229, 1.38768012, 1.8241636]
assert_allclose(osr, np.sort(x))
assert_allclose(osm, osm_expected)
res, res_fit = stats.probplot(x, fit=True)
res_fit_expected = [1.05361841, 0.31297795, 0.98741609]
assert_allclose(res_fit, res_fit_expected)
def test_sparams_keyword(self):
x = stats.norm.rvs(size=100, random_state=123456)
# Check that None, () and 0 (loc=0, for normal distribution) all work
# and give the same results
osm1, osr1 = stats.probplot(x, sparams=None, fit=False)
osm2, osr2 = stats.probplot(x, sparams=0, fit=False)
osm3, osr3 = stats.probplot(x, sparams=(), fit=False)
assert_allclose(osm1, osm2)
assert_allclose(osm1, osm3)
assert_allclose(osr1, osr2)
assert_allclose(osr1, osr3)
# Check giving (loc, scale) params for normal distribution
osm, osr = stats.probplot(x, sparams=(), fit=False)
def test_dist_keyword(self):
x = stats.norm.rvs(size=20, random_state=12345)
osm1, osr1 = stats.probplot(x, fit=False, dist='t', sparams=(3,))
osm2, osr2 = stats.probplot(x, fit=False, dist=stats.t, sparams=(3,))
assert_allclose(osm1, osm2)
assert_allclose(osr1, osr2)
assert_raises(ValueError, stats.probplot, x, dist='wrong-dist-name')
assert_raises(AttributeError, stats.probplot, x, dist=[])
class custom_dist:
"""Some class that looks just enough like a distribution."""
def ppf(self, q):
return stats.norm.ppf(q, loc=2)
osm1, osr1 = stats.probplot(x, sparams=(2,), fit=False)
osm2, osr2 = stats.probplot(x, dist=custom_dist(), fit=False)
assert_allclose(osm1, osm2)
assert_allclose(osr1, osr2)
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_plot_kwarg(self):
fig = plt.figure()
fig.add_subplot(111)
x = stats.t.rvs(3, size=100, random_state=7654321)
res1, fitres1 = stats.probplot(x, plot=plt)
plt.close()
res2, fitres2 = stats.probplot(x, plot=None)
res3 = stats.probplot(x, fit=False, plot=plt)
plt.close()
res4 = stats.probplot(x, fit=False, plot=None)
# Check that results are consistent between combinations of `fit` and
# `plot` keywords.
assert_(len(res1) == len(res2) == len(res3) == len(res4) == 2)
assert_allclose(res1, res2)
assert_allclose(res1, res3)
assert_allclose(res1, res4)
assert_allclose(fitres1, fitres2)
# Check that a Matplotlib Axes object is accepted
fig = plt.figure()
ax = fig.add_subplot(111)
stats.probplot(x, fit=False, plot=ax)
plt.close()
def test_probplot_bad_args(self):
# Raise ValueError when given an invalid distribution.
assert_raises(ValueError, stats.probplot, [1], dist="plate_of_shrimp")
def test_empty(self):
assert_equal(stats.probplot([], fit=False),
(np.array([]), np.array([])))
assert_equal(stats.probplot([], fit=True),
((np.array([]), np.array([])),
(np.nan, np.nan, 0.0)))
def test_array_of_size_one(self):
with np.errstate(invalid='ignore'):
assert_equal(stats.probplot([1], fit=True),
((np.array([0.]), np.array([1])),
(np.nan, np.nan, 0.0)))
class TestWilcoxon:
def test_wilcoxon_bad_arg(self):
# Raise ValueError when two args of different lengths are given or
# zero_method is unknown.
assert_raises(ValueError, stats.wilcoxon, [1], [1, 2])
assert_raises(ValueError, stats.wilcoxon, [1, 2], [1, 2], "dummy")
assert_raises(ValueError, stats.wilcoxon, [1, 2], [1, 2],
alternative="dummy")
assert_raises(ValueError, stats.wilcoxon, [1]*10, mode="xyz")
def test_zero_diff(self):
x = np.arange(20)
# pratt and wilcox do not work if x - y == 0
assert_raises(ValueError, stats.wilcoxon, x, x, "wilcox",
mode="approx")
assert_raises(ValueError, stats.wilcoxon, x, x, "pratt",
mode="approx")
# ranksum is n*(n+1)/2, split in half if zero_method == "zsplit"
assert_equal(stats.wilcoxon(x, x, "zsplit", mode="approx"),
(20*21/4, 1.0))
def test_pratt(self):
# regression test for gh-6805: p-value matches value from R package
# coin (wilcoxsign_test) reported in the issue
x = [1, 2, 3, 4]
y = [1, 2, 3, 5]
with suppress_warnings() as sup:
sup.filter(UserWarning, message="Sample size too small")
res = stats.wilcoxon(x, y, zero_method="pratt", mode="approx")
assert_allclose(res, (0.0, 0.31731050786291415))
def test_wilcoxon_arg_type(self):
# Should be able to accept list as arguments.
# Address issue 6070.
arr = [1, 2, 3, 0, -1, 3, 1, 2, 1, 1, 2]
_ = stats.wilcoxon(arr, zero_method="pratt", mode="approx")
_ = stats.wilcoxon(arr, zero_method="zsplit", mode="approx")
_ = stats.wilcoxon(arr, zero_method="wilcox", mode="approx")
def test_accuracy_wilcoxon(self):
freq = [1, 4, 16, 15, 8, 4, 5, 1, 2]
nums = range(-4, 5)
x = np.concatenate([[u] * v for u, v in zip(nums, freq)])
y = np.zeros(x.size)
T, p = stats.wilcoxon(x, y, "pratt", mode="approx")
assert_allclose(T, 423)
assert_allclose(p, 0.0031724568006762576)
T, p = stats.wilcoxon(x, y, "zsplit", mode="approx")
assert_allclose(T, 441)
assert_allclose(p, 0.0032145343172473055)
T, p = stats.wilcoxon(x, y, "wilcox", mode="approx")
assert_allclose(T, 327)
assert_allclose(p, 0.00641346115861)
# Test the 'correction' option, using values computed in R with:
# > wilcox.test(x, y, paired=TRUE, exact=FALSE, correct={FALSE,TRUE})
x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
T, p = stats.wilcoxon(x, y, correction=False, mode="approx")
assert_equal(T, 34)
assert_allclose(p, 0.6948866, rtol=1e-6)
T, p = stats.wilcoxon(x, y, correction=True, mode="approx")
assert_equal(T, 34)
assert_allclose(p, 0.7240817, rtol=1e-6)
def test_wilcoxon_result_attributes(self):
x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
res = stats.wilcoxon(x, y, correction=False, mode="approx")
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_wilcoxon_tie(self):
# Regression test for gh-2391.
# Corresponding R code is:
# > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=FALSE)
# > result$p.value
# [1] 0.001565402
# > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=TRUE)
# > result$p.value
# [1] 0.001904195
stat, p = stats.wilcoxon([0.1] * 10, mode="approx")
expected_p = 0.001565402
assert_equal(stat, 0)
assert_allclose(p, expected_p, rtol=1e-6)
stat, p = stats.wilcoxon([0.1] * 10, correction=True, mode="approx")
expected_p = 0.001904195
assert_equal(stat, 0)
assert_allclose(p, expected_p, rtol=1e-6)
def test_onesided(self):
# tested against "R version 3.4.1 (2017-06-30)"
# x <- c(125, 115, 130, 140, 140, 115, 140, 125, 140, 135)
# y <- c(110, 122, 125, 120, 140, 124, 123, 137, 135, 145)
# cfg <- list(x = x, y = y, paired = TRUE, exact = FALSE)
# do.call(wilcox.test, c(cfg, list(alternative = "less", correct = FALSE)))
# do.call(wilcox.test, c(cfg, list(alternative = "less", correct = TRUE)))
# do.call(wilcox.test, c(cfg, list(alternative = "greater", correct = FALSE)))
# do.call(wilcox.test, c(cfg, list(alternative = "greater", correct = TRUE)))
x = [125, 115, 130, 140, 140, 115, 140, 125, 140, 135]
y = [110, 122, 125, 120, 140, 124, 123, 137, 135, 145]
with suppress_warnings() as sup:
sup.filter(UserWarning, message="Sample size too small")
w, p = stats.wilcoxon(x, y, alternative="less", mode="approx")
assert_equal(w, 27)
assert_almost_equal(p, 0.7031847, decimal=6)
with suppress_warnings() as sup:
sup.filter(UserWarning, message="Sample size too small")
w, p = stats.wilcoxon(x, y, alternative="less", correction=True,
mode="approx")
assert_equal(w, 27)
assert_almost_equal(p, 0.7233656, decimal=6)
with suppress_warnings() as sup:
sup.filter(UserWarning, message="Sample size too small")
w, p = stats.wilcoxon(x, y, alternative="greater", mode="approx")
assert_equal(w, 27)
assert_almost_equal(p, 0.2968153, decimal=6)
with suppress_warnings() as sup:
sup.filter(UserWarning, message="Sample size too small")
w, p = stats.wilcoxon(x, y, alternative="greater", correction=True,
mode="approx")
assert_equal(w, 27)
assert_almost_equal(p, 0.3176447, decimal=6)
def test_exact_basic(self):
for n in range(1, 26):
cnt = _get_wilcoxon_distr(n)
assert_equal(n*(n+1)/2 + 1, len(cnt))
assert_equal(sum(cnt), 2**n)
def test_exact_pval(self):
# expected values computed with "R version 3.4.1 (2017-06-30)"
x = np.array([1.81, 0.82, 1.56, -0.48, 0.81, 1.28, -1.04, 0.23,
-0.75, 0.14])
y = np.array([0.71, 0.65, -0.2, 0.85, -1.1, -0.45, -0.84, -0.24,
-0.68, -0.76])
_, p = stats.wilcoxon(x, y, alternative="two-sided", mode="exact")
assert_almost_equal(p, 0.1054688, decimal=6)
_, p = stats.wilcoxon(x, y, alternative="less", mode="exact")
assert_almost_equal(p, 0.9580078, decimal=6)
_, p = stats.wilcoxon(x, y, alternative="greater", mode="exact")
assert_almost_equal(p, 0.05273438, decimal=6)
x = np.arange(0, 20) + 0.5
y = np.arange(20, 0, -1)
_, p = stats.wilcoxon(x, y, alternative="two-sided", mode="exact")
assert_almost_equal(p, 0.8694878, decimal=6)
_, p = stats.wilcoxon(x, y, alternative="less", mode="exact")
assert_almost_equal(p, 0.4347439, decimal=6)
_, p = stats.wilcoxon(x, y, alternative="greater", mode="exact")
assert_almost_equal(p, 0.5795889, decimal=6)
d = np.arange(26) + 1
assert_raises(ValueError, stats.wilcoxon, d, mode="exact")
# These inputs were chosen to give a W statistic that is either the
# center of the distribution (when the length of the support is odd), or
# the value to the left of the center (when the length of the support is
# even). Also, the numbers are chosen so that the W statistic is the
# sum of the positive values.
@pytest.mark.parametrize('x', [[-1, -2, 3],
[-1, 2, -3, -4, 5],
[-1, -2, 3, -4, -5, -6, 7, 8]])
def test_exact_p_1(self, x):
w, p = stats.wilcoxon(x)
x = np.array(x)
wtrue = x[x > 0].sum()
assert_equal(w, wtrue)
assert_equal(p, 1)
def test_auto(self):
# auto default to exact if there are no ties and n<= 25
x = np.arange(0, 25) + 0.5
y = np.arange(25, 0, -1)
assert_equal(stats.wilcoxon(x, y),
stats.wilcoxon(x, y, mode="exact"))
# if there are ties (i.e. zeros in d = x-y), then switch to approx
d = np.arange(0, 13)
with suppress_warnings() as sup:
sup.filter(UserWarning, message="Exact p-value calculation")
w, p = stats.wilcoxon(d)
assert_equal(stats.wilcoxon(d, mode="approx"), (w, p))
# use approximation for samples > 25
d = np.arange(1, 27)
assert_equal(stats.wilcoxon(d), stats.wilcoxon(d, mode="approx"))
class TestKstat:
def test_moments_normal_distribution(self):
np.random.seed(32149)
data = np.random.randn(12345)
moments = [stats.kstat(data, n) for n in [1, 2, 3, 4]]
expected = [0.011315, 1.017931, 0.05811052, 0.0754134]
assert_allclose(moments, expected, rtol=1e-4)
# test equivalence with `stats.moment`
m1 = stats.moment(data, moment=1)
m2 = stats.moment(data, moment=2)
m3 = stats.moment(data, moment=3)
assert_allclose((m1, m2, m3), expected[:-1], atol=0.02, rtol=1e-2)
def test_empty_input(self):
assert_raises(ValueError, stats.kstat, [])
def test_nan_input(self):
data = np.arange(10.)
data[6] = np.nan
assert_equal(stats.kstat(data), np.nan)
def test_kstat_bad_arg(self):
# Raise ValueError if n > 4 or n < 1.
data = np.arange(10)
for n in [0, 4.001]:
assert_raises(ValueError, stats.kstat, data, n=n)
class TestKstatVar:
def test_empty_input(self):
assert_raises(ValueError, stats.kstatvar, [])
def test_nan_input(self):
data = np.arange(10.)
data[6] = np.nan
assert_equal(stats.kstat(data), np.nan)
def test_bad_arg(self):
# Raise ValueError is n is not 1 or 2.
data = [1]
n = 10
assert_raises(ValueError, stats.kstatvar, data, n=n)
class TestPpccPlot:
def setup_method(self):
self.x = stats.loggamma.rvs(5, size=500, random_state=7654321) + 5
def test_basic(self):
N = 5
svals, ppcc = stats.ppcc_plot(self.x, -10, 10, N=N)
ppcc_expected = [0.21139644, 0.21384059, 0.98766719, 0.97980182,
0.93519298]
assert_allclose(svals, np.linspace(-10, 10, num=N))
assert_allclose(ppcc, ppcc_expected)
def test_dist(self):
# Test that we can specify distributions both by name and as objects.
svals1, ppcc1 = stats.ppcc_plot(self.x, -10, 10, dist='tukeylambda')
svals2, ppcc2 = stats.ppcc_plot(self.x, -10, 10,
dist=stats.tukeylambda)
assert_allclose(svals1, svals2, rtol=1e-20)
assert_allclose(ppcc1, ppcc2, rtol=1e-20)
# Test that 'tukeylambda' is the default dist
svals3, ppcc3 = stats.ppcc_plot(self.x, -10, 10)
assert_allclose(svals1, svals3, rtol=1e-20)
assert_allclose(ppcc1, ppcc3, rtol=1e-20)
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_plot_kwarg(self):
# Check with the matplotlib.pyplot module
fig = plt.figure()
ax = fig.add_subplot(111)
stats.ppcc_plot(self.x, -20, 20, plot=plt)
fig.delaxes(ax)
# Check that a Matplotlib Axes object is accepted
ax = fig.add_subplot(111)
stats.ppcc_plot(self.x, -20, 20, plot=ax)
plt.close()
def test_invalid_inputs(self):
# `b` has to be larger than `a`
assert_raises(ValueError, stats.ppcc_plot, self.x, 1, 0)
# Raise ValueError when given an invalid distribution.
assert_raises(ValueError, stats.ppcc_plot, [1, 2, 3], 0, 1,
dist="plate_of_shrimp")
def test_empty(self):
# For consistency with probplot return for one empty array,
# ppcc contains all zeros and svals is the same as for normal array
# input.
svals, ppcc = stats.ppcc_plot([], 0, 1)
assert_allclose(svals, np.linspace(0, 1, num=80))
assert_allclose(ppcc, np.zeros(80, dtype=float))
class TestPpccMax:
def test_ppcc_max_bad_arg(self):
# Raise ValueError when given an invalid distribution.
data = [1]
assert_raises(ValueError, stats.ppcc_max, data, dist="plate_of_shrimp")
def test_ppcc_max_basic(self):
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000,
random_state=1234567) + 1e4
assert_almost_equal(stats.ppcc_max(x), -0.71215366521264145, decimal=7)
def test_dist(self):
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000,
random_state=1234567) + 1e4
# Test that we can specify distributions both by name and as objects.
max1 = stats.ppcc_max(x, dist='tukeylambda')
max2 = stats.ppcc_max(x, dist=stats.tukeylambda)
assert_almost_equal(max1, -0.71215366521264145, decimal=5)
assert_almost_equal(max2, -0.71215366521264145, decimal=5)
# Test that 'tukeylambda' is the default dist
max3 = stats.ppcc_max(x)
assert_almost_equal(max3, -0.71215366521264145, decimal=5)
def test_brack(self):
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000,
random_state=1234567) + 1e4
assert_raises(ValueError, stats.ppcc_max, x, brack=(0.0, 1.0, 0.5))
assert_almost_equal(stats.ppcc_max(x, brack=(0, 1)),
-0.71215366521264145, decimal=7)
assert_almost_equal(stats.ppcc_max(x, brack=(-2, 2)),
-0.71215366521264145, decimal=7)
class TestBoxcox_llf:
def test_basic(self):
x = stats.norm.rvs(size=10000, loc=10, random_state=54321)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf_expected = -x.size / 2. * np.log(np.sum(x.std()**2))
assert_allclose(llf, llf_expected)
def test_array_like(self):
x = stats.norm.rvs(size=100, loc=10, random_state=54321)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, list(x))
assert_allclose(llf, llf2, rtol=1e-12)
def test_2d_input(self):
# Note: boxcox_llf() was already working with 2-D input (sort of), so
# keep it like that. boxcox() doesn't work with 2-D input though, due
# to brent() returning a scalar.
x = stats.norm.rvs(size=100, loc=10, random_state=54321)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, np.vstack([x, x]).T)
assert_allclose([llf, llf], llf2, rtol=1e-12)
def test_empty(self):
assert_(np.isnan(stats.boxcox_llf(1, [])))
def test_gh_6873(self):
# Regression test for gh-6873.
# This example was taken from gh-7534, a duplicate of gh-6873.
data = [198.0, 233.0, 233.0, 392.0]
llf = stats.boxcox_llf(-8, data)
# The expected value was computed with mpmath.
assert_allclose(llf, -17.93934208579061)
# This is the data from github user Qukaiyi, given as an example
# of a data set that caused boxcox to fail.
_boxcox_data = [
15957, 112079, 1039553, 711775, 173111, 307382, 183155, 53366, 760875,
207500, 160045, 473714, 40194, 440319, 133261, 265444, 155590, 36660,
904939, 55108, 138391, 339146, 458053, 63324, 1377727, 1342632, 41575,
68685, 172755, 63323, 368161, 199695, 538214, 167760, 388610, 398855,
1001873, 364591, 1320518, 194060, 194324, 2318551, 196114, 64225, 272000,
198668, 123585, 86420, 1925556, 695798, 88664, 46199, 759135, 28051,
345094, 1977752, 51778, 82746, 638126, 2560910, 45830, 140576, 1603787,
57371, 548730, 5343629, 2298913, 998813, 2156812, 423966, 68350, 145237,
131935, 1600305, 342359, 111398, 1409144, 281007, 60314, 242004, 113418,
246211, 61940, 95858, 957805, 40909, 307955, 174159, 124278, 241193,
872614, 304180, 146719, 64361, 87478, 509360, 167169, 933479, 620561,
483333, 97416, 143518, 286905, 597837, 2556043, 89065, 69944, 196858,
88883, 49379, 916265, 1527392, 626954, 54415, 89013, 2883386, 106096,
402697, 45578, 349852, 140379, 34648, 757343, 1305442, 2054757, 121232,
606048, 101492, 51426, 1820833, 83412, 136349, 1379924, 505977, 1303486,
95853, 146451, 285422, 2205423, 259020, 45864, 684547, 182014, 784334,
174793, 563068, 170745, 1195531, 63337, 71833, 199978, 2330904, 227335,
898280, 75294, 2011361, 116771, 157489, 807147, 1321443, 1148635, 2456524,
81839, 1228251, 97488, 1051892, 75397, 3009923, 2732230, 90923, 39735,
132433, 225033, 337555, 1204092, 686588, 1062402, 40362, 1361829, 1497217,
150074, 551459, 2019128, 39581, 45349, 1117187, 87845, 1877288, 164448,
10338362, 24942, 64737, 769946, 2469124, 2366997, 259124, 2667585, 29175,
56250, 74450, 96697, 5920978, 838375, 225914, 119494, 206004, 430907,
244083, 219495, 322239, 407426, 618748, 2087536, 2242124, 4736149, 124624,
406305, 240921, 2675273, 4425340, 821457, 578467, 28040, 348943, 48795,
145531, 52110, 1645730, 1768364, 348363, 85042, 2673847, 81935, 169075,
367733, 135474, 383327, 1207018, 93481, 5934183, 352190, 636533, 145870,
55659, 146215, 73191, 248681, 376907, 1606620, 169381, 81164, 246390,
236093, 885778, 335969, 49266, 381430, 307437, 350077, 34346, 49340,
84715, 527120, 40163, 46898, 4609439, 617038, 2239574, 159905, 118337,
120357, 430778, 3799158, 3516745, 54198, 2970796, 729239, 97848, 6317375,
887345, 58198, 88111, 867595, 210136, 1572103, 1420760, 574046, 845988,
509743, 397927, 1119016, 189955, 3883644, 291051, 126467, 1239907, 2556229,
411058, 657444, 2025234, 1211368, 93151, 577594, 4842264, 1531713, 305084,
479251, 20591, 1466166, 137417, 897756, 594767, 3606337, 32844, 82426,
1294831, 57174, 290167, 322066, 813146, 5671804, 4425684, 895607, 450598,
1048958, 232844, 56871, 46113, 70366, 701618, 97739, 157113, 865047,
194810, 1501615, 1765727, 38125, 2733376, 40642, 437590, 127337, 106310,
4167579, 665303, 809250, 1210317, 45750, 1853687, 348954, 156786, 90793,
1885504, 281501, 3902273, 359546, 797540, 623508, 3672775, 55330, 648221,
266831, 90030, 7118372, 735521, 1009925, 283901, 806005, 2434897, 94321,
309571, 4213597, 2213280, 120339, 64403, 8155209, 1686948, 4327743,
1868312, 135670, 3189615, 1569446, 706058, 58056, 2438625, 520619, 105201,
141961, 179990, 1351440, 3148662, 2804457, 2760144, 70775, 33807, 1926518,
2362142, 186761, 240941, 97860, 1040429, 1431035, 78892, 484039, 57845,
724126, 3166209, 175913, 159211, 1182095, 86734, 1921472, 513546, 326016,
1891609
]
class TestBoxcox:
def test_fixed_lmbda(self):
x = stats.loggamma.rvs(5, size=50, random_state=12345) + 5
xt = stats.boxcox(x, lmbda=1)
assert_allclose(xt, x - 1)
xt = stats.boxcox(x, lmbda=-1)
assert_allclose(xt, 1 - 1/x)
xt = stats.boxcox(x, lmbda=0)
assert_allclose(xt, np.log(x))
# Also test that array_like input works
xt = stats.boxcox(list(x), lmbda=0)
assert_allclose(xt, np.log(x))
def test_lmbda_None(self):
# Start from normal rv's, do inverse transform to check that
# optimization function gets close to the right answer.
lmbda = 2.5
x = stats.norm.rvs(loc=10, size=50000, random_state=1245)
x_inv = (x * lmbda + 1)**(-lmbda)
xt, maxlog = stats.boxcox(x_inv)
assert_almost_equal(maxlog, -1 / lmbda, decimal=2)
def test_alpha(self):
rng = np.random.RandomState(1234)
x = stats.loggamma.rvs(5, size=50, random_state=rng) + 5
# Some regular values for alpha, on a small sample size
_, _, interval = stats.boxcox(x, alpha=0.75)
assert_allclose(interval, [4.004485780226041, 5.138756355035744])
_, _, interval = stats.boxcox(x, alpha=0.05)
assert_allclose(interval, [1.2138178554857557, 8.209033272375663])
# Try some extreme values, see we don't hit the N=500 limit
x = stats.loggamma.rvs(7, size=500, random_state=rng) + 15
_, _, interval = stats.boxcox(x, alpha=0.001)
assert_allclose(interval, [0.3988867, 11.40553131])
_, _, interval = stats.boxcox(x, alpha=0.999)
assert_allclose(interval, [5.83316246, 5.83735292])
def test_boxcox_bad_arg(self):
# Raise ValueError if any data value is negative.
x = np.array([-1, 2])
assert_raises(ValueError, stats.boxcox, x)
# Raise ValueError if data is constant.
assert_raises(ValueError, stats.boxcox, np.array([1]))
# Raise ValueError if data is not 1-dimensional.
assert_raises(ValueError, stats.boxcox, np.array([[1], [2]]))
def test_empty(self):
assert_(stats.boxcox([]).shape == (0,))
def test_gh_6873(self):
# Regression test for gh-6873.
y, lam = stats.boxcox(_boxcox_data)
# The expected value of lam was computed with the function
# powerTransform in the R library 'car'. I trust that value
# to only about five significant digits.
assert_allclose(lam, -0.051654, rtol=1e-5)
@pytest.mark.parametrize("bounds", [(-1, 1), (1.1, 2), (-2, -1.1)])
def test_bounded_optimizer_within_bounds(self, bounds):
# Define custom optimizer with bounds.
def optimizer(fun):
return optimize.minimize_scalar(fun, bounds=bounds,
method="bounded")
_, lmbda = stats.boxcox(_boxcox_data, lmbda=None, optimizer=optimizer)
assert bounds[0] < lmbda < bounds[1]
def test_bounded_optimizer_against_unbounded_optimizer(self):
# Test whether setting bounds on optimizer excludes solution from
# unbounded optimizer.
# Get unbounded solution.
_, lmbda = stats.boxcox(_boxcox_data, lmbda=None)
# Set tolerance and bounds around solution.
bounds = (lmbda + 0.1, lmbda + 1)
options = {'xatol': 1e-12}
def optimizer(fun):
return optimize.minimize_scalar(fun, bounds=bounds,
method="bounded", options=options)
# Check bounded solution. Lower bound should be active.
_, lmbda_bounded = stats.boxcox(_boxcox_data, lmbda=None,
optimizer=optimizer)
assert lmbda_bounded != lmbda
assert_allclose(lmbda_bounded, bounds[0])
@pytest.mark.parametrize("optimizer", ["str", (1, 2), 0.1])
def test_bad_optimizer_type_raises_error(self, optimizer):
# Check if error is raised if string, tuple or float is passed
with pytest.raises(ValueError, match="`optimizer` must be a callable"):
stats.boxcox(_boxcox_data, lmbda=None, optimizer=optimizer)
def test_bad_optimizer_value_raises_error(self):
# Check if error is raised if `optimizer` function does not return
# `OptimizeResult` object
# Define test function that always returns 1
def optimizer(fun):
return 1
message = "`optimizer` must return an object containing the optimal..."
with pytest.raises(ValueError, match=message):
stats.boxcox(_boxcox_data, lmbda=None, optimizer=optimizer)
class TestBoxcoxNormmax:
def setup_method(self):
self.x = stats.loggamma.rvs(5, size=50, random_state=12345) + 5
def test_pearsonr(self):
maxlog = stats.boxcox_normmax(self.x)
assert_allclose(maxlog, 1.804465, rtol=1e-6)
def test_mle(self):
maxlog = stats.boxcox_normmax(self.x, method='mle')
assert_allclose(maxlog, 1.758101, rtol=1e-6)
# Check that boxcox() uses 'mle'
_, maxlog_boxcox = stats.boxcox(self.x)
assert_allclose(maxlog_boxcox, maxlog)
def test_all(self):
maxlog_all = stats.boxcox_normmax(self.x, method='all')
assert_allclose(maxlog_all, [1.804465, 1.758101], rtol=1e-6)
@pytest.mark.parametrize("method", ["mle", "pearsonr", "all"])
@pytest.mark.parametrize("bounds", [(-1, 1), (1.1, 2), (-2, -1.1)])
def test_bounded_optimizer_within_bounds(self, method, bounds):
def optimizer(fun):
return optimize.minimize_scalar(fun, bounds=bounds,
method="bounded")
maxlog = stats.boxcox_normmax(self.x, method=method,
optimizer=optimizer)
assert np.all(bounds[0] < maxlog)
assert np.all(maxlog < bounds[1])
def test_user_defined_optimizer(self):
# tests an optimizer that is not based on scipy.optimize.minimize
lmbda = stats.boxcox_normmax(self.x)
lmbda_rounded = np.round(lmbda, 5)
lmbda_range = np.linspace(lmbda_rounded-0.01, lmbda_rounded+0.01, 1001)
class MyResult:
pass
def optimizer(fun):
# brute force minimum over the range
objs = []
for lmbda in lmbda_range:
objs.append(fun(lmbda))
res = MyResult()
res.x = lmbda_range[np.argmin(objs)]
return res
lmbda2 = stats.boxcox_normmax(self.x, optimizer=optimizer)
assert lmbda2 != lmbda # not identical
assert_allclose(lmbda2, lmbda, 1e-5) # but as close as it should be
def test_user_defined_optimizer_and_brack_raises_error(self):
optimizer = optimize.minimize_scalar
# Using default `brack=None` with user-defined `optimizer` works as
# expected.
stats.boxcox_normmax(self.x, brack=None, optimizer=optimizer)
# Using user-defined `brack` with user-defined `optimizer` is expected
# to throw an error. Instead, users should specify
# optimizer-specific parameters in the optimizer function itself.
with pytest.raises(ValueError, match="`brack` must be None if "
"`optimizer` is given"):
stats.boxcox_normmax(self.x, brack=(-2.0, 2.0),
optimizer=optimizer)
class TestBoxcoxNormplot:
def setup_method(self):
self.x = stats.loggamma.rvs(5, size=500, random_state=7654321) + 5
def test_basic(self):
N = 5
lmbdas, ppcc = stats.boxcox_normplot(self.x, -10, 10, N=N)
ppcc_expected = [0.57783375, 0.83610988, 0.97524311, 0.99756057,
0.95843297]
assert_allclose(lmbdas, np.linspace(-10, 10, num=N))
assert_allclose(ppcc, ppcc_expected)
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_plot_kwarg(self):
# Check with the matplotlib.pyplot module
fig = plt.figure()
ax = fig.add_subplot(111)
stats.boxcox_normplot(self.x, -20, 20, plot=plt)
fig.delaxes(ax)
# Check that a Matplotlib Axes object is accepted
ax = fig.add_subplot(111)
stats.boxcox_normplot(self.x, -20, 20, plot=ax)
plt.close()
def test_invalid_inputs(self):
# `lb` has to be larger than `la`
assert_raises(ValueError, stats.boxcox_normplot, self.x, 1, 0)
# `x` can not contain negative values
assert_raises(ValueError, stats.boxcox_normplot, [-1, 1], 0, 1)
def test_empty(self):
assert_(stats.boxcox_normplot([], 0, 1).size == 0)
class TestYeojohnson_llf:
def test_array_like(self):
x = stats.norm.rvs(size=100, loc=0, random_state=54321)
lmbda = 1
llf = stats.yeojohnson_llf(lmbda, x)
llf2 = stats.yeojohnson_llf(lmbda, list(x))
assert_allclose(llf, llf2, rtol=1e-12)
def test_2d_input(self):
x = stats.norm.rvs(size=100, loc=10, random_state=54321)
lmbda = 1
llf = stats.yeojohnson_llf(lmbda, x)
llf2 = stats.yeojohnson_llf(lmbda, np.vstack([x, x]).T)
assert_allclose([llf, llf], llf2, rtol=1e-12)
def test_empty(self):
assert_(np.isnan(stats.yeojohnson_llf(1, [])))
class TestYeojohnson:
def test_fixed_lmbda(self):
rng = np.random.RandomState(12345)
# Test positive input
x = stats.loggamma.rvs(5, size=50, random_state=rng) + 5
assert np.all(x > 0)
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt, x)
xt = stats.yeojohnson(x, lmbda=-1)
assert_allclose(xt, 1 - 1 / (x + 1))
xt = stats.yeojohnson(x, lmbda=0)
assert_allclose(xt, np.log(x + 1))
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt, x)
# Test negative input
x = stats.loggamma.rvs(5, size=50, random_state=rng) - 5
assert np.all(x < 0)
xt = stats.yeojohnson(x, lmbda=2)
assert_allclose(xt, -np.log(-x + 1))
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt, x)
xt = stats.yeojohnson(x, lmbda=3)
assert_allclose(xt, 1 / (-x + 1) - 1)
# test both positive and negative input
x = stats.loggamma.rvs(5, size=50, random_state=rng) - 2
assert not np.all(x < 0)
assert not np.all(x >= 0)
pos = x >= 0
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt[pos], x[pos])
xt = stats.yeojohnson(x, lmbda=-1)
assert_allclose(xt[pos], 1 - 1 / (x[pos] + 1))
xt = stats.yeojohnson(x, lmbda=0)
assert_allclose(xt[pos], np.log(x[pos] + 1))
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt[pos], x[pos])
neg = ~pos
xt = stats.yeojohnson(x, lmbda=2)
assert_allclose(xt[neg], -np.log(-x[neg] + 1))
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt[neg], x[neg])
xt = stats.yeojohnson(x, lmbda=3)
assert_allclose(xt[neg], 1 / (-x[neg] + 1) - 1)
@pytest.mark.parametrize('lmbda', [0, .1, .5, 2])
def test_lmbda_None(self, lmbda):
# Start from normal rv's, do inverse transform to check that
# optimization function gets close to the right answer.
def _inverse_transform(x, lmbda):
x_inv = np.zeros(x.shape, dtype=x.dtype)
pos = x >= 0
# when x >= 0
if abs(lmbda) < np.spacing(1.):
x_inv[pos] = np.exp(x[pos]) - 1
else: # lmbda != 0
x_inv[pos] = np.power(x[pos] * lmbda + 1, 1 / lmbda) - 1
# when x < 0
if abs(lmbda - 2) > np.spacing(1.):
x_inv[~pos] = 1 - np.power(-(2 - lmbda) * x[~pos] + 1,
1 / (2 - lmbda))
else: # lmbda == 2
x_inv[~pos] = 1 - np.exp(-x[~pos])
return x_inv
n_samples = 20000
np.random.seed(1234567)
x = np.random.normal(loc=0, scale=1, size=(n_samples))
x_inv = _inverse_transform(x, lmbda)
xt, maxlog = stats.yeojohnson(x_inv)
assert_allclose(maxlog, lmbda, atol=1e-2)
assert_almost_equal(0, np.linalg.norm(x - xt) / n_samples, decimal=2)
assert_almost_equal(0, xt.mean(), decimal=1)
assert_almost_equal(1, xt.std(), decimal=1)
def test_empty(self):
assert_(stats.yeojohnson([]).shape == (0,))
def test_array_like(self):
x = stats.norm.rvs(size=100, loc=0, random_state=54321)
xt1, _ = stats.yeojohnson(x)
xt2, _ = stats.yeojohnson(list(x))
assert_allclose(xt1, xt2, rtol=1e-12)
@pytest.mark.parametrize('dtype', [np.complex64, np.complex128])
def test_input_dtype_complex(self, dtype):
x = np.arange(6, dtype=dtype)
err_msg = ('Yeo-Johnson transformation is not defined for complex '
'numbers.')
with pytest.raises(ValueError, match=err_msg):
stats.yeojohnson(x)
@pytest.mark.parametrize('dtype', [np.int8, np.uint8, np.int16, np.int32])
def test_input_dtype_integer(self, dtype):
x_int = np.arange(8, dtype=dtype)
x_float = np.arange(8, dtype=np.float64)
xt_int, lmbda_int = stats.yeojohnson(x_int)
xt_float, lmbda_float = stats.yeojohnson(x_float)
assert_allclose(xt_int, xt_float, rtol=1e-7)
assert_allclose(lmbda_int, lmbda_float, rtol=1e-7)
class TestYeojohnsonNormmax:
def setup_method(self):
self.x = stats.loggamma.rvs(5, size=50, random_state=12345) + 5
def test_mle(self):
maxlog = stats.yeojohnson_normmax(self.x)
assert_allclose(maxlog, 1.876393, rtol=1e-6)
def test_darwin_example(self):
# test from original paper "A new family of power transformations to
# improve normality or symmetry" by Yeo and Johnson.
x = [6.1, -8.4, 1.0, 2.0, 0.7, 2.9, 3.5, 5.1, 1.8, 3.6, 7.0, 3.0, 9.3,
7.5, -6.0]
lmbda = stats.yeojohnson_normmax(x)
assert np.allclose(lmbda, 1.305, atol=1e-3)
class TestCircFuncs:
@pytest.mark.parametrize("test_func,expected",
[(stats.circmean, 0.167690146),
(stats.circvar, 42.51955609),
(stats.circstd, 6.520702116)])
def test_circfuncs(self, test_func, expected):
x = np.array([355, 5, 2, 359, 10, 350])
assert_allclose(test_func(x, high=360), expected, rtol=1e-7)
def test_circfuncs_small(self):
x = np.array([20, 21, 22, 18, 19, 20.5, 19.2])
M1 = x.mean()
M2 = stats.circmean(x, high=360)
assert_allclose(M2, M1, rtol=1e-5)
V1 = x.var()
V2 = stats.circvar(x, high=360)
assert_allclose(V2, V1, rtol=1e-4)
S1 = x.std()
S2 = stats.circstd(x, high=360)
assert_allclose(S2, S1, rtol=1e-4)
@pytest.mark.parametrize("test_func, numpy_func",
[(stats.circmean, np.mean),
(stats.circvar, np.var),
(stats.circstd, np.std)])
def test_circfuncs_close(self, test_func, numpy_func):
# circfuncs should handle very similar inputs (gh-12740)
x = np.array([0.12675364631578953] * 10 + [0.12675365920187928] * 100)
circstat = test_func(x)
normal = numpy_func(x)
assert_allclose(circstat, normal, atol=1e-8)
def test_circmean_axis(self):
x = np.array([[355, 5, 2, 359, 10, 350],
[351, 7, 4, 352, 9, 349],
[357, 9, 8, 358, 4, 356]])
M1 = stats.circmean(x, high=360)
M2 = stats.circmean(x.ravel(), high=360)
assert_allclose(M1, M2, rtol=1e-14)
M1 = stats.circmean(x, high=360, axis=1)
M2 = [stats.circmean(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(M1, M2, rtol=1e-14)
M1 = stats.circmean(x, high=360, axis=0)
M2 = [stats.circmean(x[:, i], high=360) for i in range(x.shape[1])]
assert_allclose(M1, M2, rtol=1e-14)
def test_circvar_axis(self):
x = np.array([[355, 5, 2, 359, 10, 350],
[351, 7, 4, 352, 9, 349],
[357, 9, 8, 358, 4, 356]])
V1 = stats.circvar(x, high=360)
V2 = stats.circvar(x.ravel(), high=360)
assert_allclose(V1, V2, rtol=1e-11)
V1 = stats.circvar(x, high=360, axis=1)
V2 = [stats.circvar(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(V1, V2, rtol=1e-11)
V1 = stats.circvar(x, high=360, axis=0)
V2 = [stats.circvar(x[:, i], high=360) for i in range(x.shape[1])]
assert_allclose(V1, V2, rtol=1e-11)
def test_circstd_axis(self):
x = np.array([[355, 5, 2, 359, 10, 350],
[351, 7, 4, 352, 9, 349],
[357, 9, 8, 358, 4, 356]])
S1 = stats.circstd(x, high=360)
S2 = stats.circstd(x.ravel(), high=360)
assert_allclose(S1, S2, rtol=1e-11)
S1 = stats.circstd(x, high=360, axis=1)
S2 = [stats.circstd(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(S1, S2, rtol=1e-11)
S1 = stats.circstd(x, high=360, axis=0)
S2 = [stats.circstd(x[:, i], high=360) for i in range(x.shape[1])]
assert_allclose(S1, S2, rtol=1e-11)
@pytest.mark.parametrize("test_func,expected",
[(stats.circmean, 0.167690146),
(stats.circvar, 42.51955609),
(stats.circstd, 6.520702116)])
def test_circfuncs_array_like(self, test_func, expected):
x = [355, 5, 2, 359, 10, 350]
assert_allclose(test_func(x, high=360), expected, rtol=1e-7)
@pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar,
stats.circstd])
def test_empty(self, test_func):
assert_(np.isnan(test_func([])))
@pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar,
stats.circstd])
def test_nan_propagate(self, test_func):
x = [355, 5, 2, 359, 10, 350, np.nan]
assert_(np.isnan(test_func(x, high=360)))
@pytest.mark.parametrize("test_func,expected",
[(stats.circmean,
{None: np.nan, 0: 355.66582264, 1: 0.28725053}),
(stats.circvar,
{None: np.nan, 0: 16.89976130, 1: 36.51366669}),
(stats.circstd,
{None: np.nan, 0: 4.11093193, 1: 6.04265394})])
def test_nan_propagate_array(self, test_func, expected):
x = np.array([[355, 5, 2, 359, 10, 350, 1],
[351, 7, 4, 352, 9, 349, np.nan],
[1, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]])
for axis in expected.keys():
out = test_func(x, high=360, axis=axis)
if axis is None:
assert_(np.isnan(out))
else:
assert_allclose(out[0], expected[axis], rtol=1e-7)
assert_(np.isnan(out[1:]).all())
@pytest.mark.parametrize("test_func,expected",
[(stats.circmean,
{None: 359.4178026893944,
0: np.array([353.0, 6.0, 3.0, 355.5, 9.5,
349.5]),
1: np.array([0.16769015, 358.66510252])}),
(stats.circvar,
{None: 55.362093503276725,
0: np.array([4.00081258, 1.00005077, 1.00005077,
12.25762620, 0.25000317,
0.25000317]),
1: np.array([42.51955609, 67.09872148])}),
(stats.circstd,
{None: 7.440570778057074,
0: np.array([2.00020313, 1.00002539, 1.00002539,
3.50108929, 0.50000317,
0.50000317]),
1: np.array([6.52070212, 8.19138093])})])
def test_nan_omit_array(self, test_func, expected):
x = np.array([[355, 5, 2, 359, 10, 350, np.nan],
[351, 7, 4, 352, 9, 349, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]])
for axis in expected.keys():
out = test_func(x, high=360, nan_policy='omit', axis=axis)
if axis is None:
assert_allclose(out, expected[axis], rtol=1e-7)
else:
assert_allclose(out[:-1], expected[axis], rtol=1e-7)
assert_(np.isnan(out[-1]))
@pytest.mark.parametrize("test_func,expected",
[(stats.circmean, 0.167690146),
(stats.circvar, 42.51955609),
(stats.circstd, 6.520702116)])
def test_nan_omit(self, test_func, expected):
x = [355, 5, 2, 359, 10, 350, np.nan]
assert_allclose(test_func(x, high=360, nan_policy='omit'),
expected, rtol=1e-7)
@pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar,
stats.circstd])
def test_nan_omit_all(self, test_func):
x = [np.nan, np.nan, np.nan, np.nan, np.nan]
assert_(np.isnan(test_func(x, nan_policy='omit')))
@pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar,
stats.circstd])
def test_nan_omit_all_axis(self, test_func):
x = np.array([[np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan]])
out = test_func(x, nan_policy='omit', axis=1)
assert_(np.isnan(out).all())
assert_(len(out) == 2)
@pytest.mark.parametrize("x",
[[355, 5, 2, 359, 10, 350, np.nan],
np.array([[355, 5, 2, 359, 10, 350, np.nan],
[351, 7, 4, 352, np.nan, 9, 349]])])
@pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar,
stats.circstd])
def test_nan_raise(self, test_func, x):
assert_raises(ValueError, test_func, x, high=360, nan_policy='raise')
@pytest.mark.parametrize("x",
[[355, 5, 2, 359, 10, 350, np.nan],
np.array([[355, 5, 2, 359, 10, 350, np.nan],
[351, 7, 4, 352, np.nan, 9, 349]])])
@pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar,
stats.circstd])
def test_bad_nan_policy(self, test_func, x):
assert_raises(ValueError, test_func, x, high=360, nan_policy='foobar')
def test_circmean_scalar(self):
x = 1.
M1 = x
M2 = stats.circmean(x)
assert_allclose(M2, M1, rtol=1e-5)
def test_circmean_range(self):
# regression test for gh-6420: circmean(..., high, low) must be
# between `high` and `low`
m = stats.circmean(np.arange(0, 2, 0.1), np.pi, -np.pi)
assert_(m < np.pi)
assert_(m > -np.pi)
def test_circfuncs_unit8(self):
# regression test for gh-7255: overflow when working with
# numpy uint8 data type
x = np.array([150, 10], dtype='uint8')
assert_equal(stats.circmean(x, high=180), 170.0)
assert_allclose(stats.circvar(x, high=180), 437.45871686, rtol=1e-7)
assert_allclose(stats.circstd(x, high=180), 20.91551378, rtol=1e-7)
class TestMedianTest:
def test_bad_n_samples(self):
# median_test requires at least two samples.
assert_raises(ValueError, stats.median_test, [1, 2, 3])
def test_empty_sample(self):
# Each sample must contain at least one value.
assert_raises(ValueError, stats.median_test, [], [1, 2, 3])
def test_empty_when_ties_ignored(self):
# The grand median is 1, and all values in the first argument are
# equal to the grand median. With ties="ignore", those values are
# ignored, which results in the first sample being (in effect) empty.
# This should raise a ValueError.
assert_raises(ValueError, stats.median_test,
[1, 1, 1, 1], [2, 0, 1], [2, 0], ties="ignore")
def test_empty_contingency_row(self):
# The grand median is 1, and with the default ties="below", all the
# values in the samples are counted as being below the grand median.
# This would result a row of zeros in the contingency table, which is
# an error.
assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1])
# With ties="above", all the values are counted as above the
# grand median.
assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1],
ties="above")
def test_bad_ties(self):
assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5],
ties="foo")
def test_bad_nan_policy(self):
assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5], nan_policy='foobar')
def test_bad_keyword(self):
assert_raises(TypeError, stats.median_test, [1, 2, 3], [4, 5],
foo="foo")
def test_simple(self):
x = [1, 2, 3]
y = [1, 2, 3]
stat, p, med, tbl = stats.median_test(x, y)
# The median is floating point, but this equality test should be safe.
assert_equal(med, 2.0)
assert_array_equal(tbl, [[1, 1], [2, 2]])
# The expected values of the contingency table equal the contingency
# table, so the statistic should be 0 and the p-value should be 1.
assert_equal(stat, 0)
assert_equal(p, 1)
def test_ties_options(self):
# Test the contingency table calculation.
x = [1, 2, 3, 4]
y = [5, 6]
z = [7, 8, 9]
# grand median is 5.
# Default 'ties' option is "below".
stat, p, m, tbl = stats.median_test(x, y, z)
assert_equal(m, 5)
assert_equal(tbl, [[0, 1, 3], [4, 1, 0]])
stat, p, m, tbl = stats.median_test(x, y, z, ties="ignore")
assert_equal(m, 5)
assert_equal(tbl, [[0, 1, 3], [4, 0, 0]])
stat, p, m, tbl = stats.median_test(x, y, z, ties="above")
assert_equal(m, 5)
assert_equal(tbl, [[0, 2, 3], [4, 0, 0]])
def test_nan_policy_options(self):
x = [1, 2, np.nan]
y = [4, 5, 6]
mt1 = stats.median_test(x, y, nan_policy='propagate')
s, p, m, t = stats.median_test(x, y, nan_policy='omit')
assert_equal(mt1, (np.nan, np.nan, np.nan, None))
assert_allclose(s, 0.31250000000000006)
assert_allclose(p, 0.57615012203057869)
assert_equal(m, 4.0)
assert_equal(t, np.array([[0, 2],[2, 1]]))
assert_raises(ValueError, stats.median_test, x, y, nan_policy='raise')
def test_basic(self):
# median_test calls chi2_contingency to compute the test statistic
# and p-value. Make sure it hasn't screwed up the call...
x = [1, 2, 3, 4, 5]
y = [2, 4, 6, 8]
stat, p, m, tbl = stats.median_test(x, y)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
stat, p, m, tbl = stats.median_test(x, y, lambda_=0)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, lambda_=0)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
stat, p, m, tbl = stats.median_test(x, y, correction=False)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, correction=False)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
| bsd-3-clause |
kylerbrown/scikit-learn | sklearn/tests/test_multiclass.py | 136 | 23649 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.multiclass import fit_ovr
from sklearn.multiclass import fit_ovo
from sklearn.multiclass import fit_ecoc
from sklearn.multiclass import predict_ovr
from sklearn.multiclass import predict_ovo
from sklearn.multiclass import predict_ecoc
from sklearn.multiclass import predict_proba_ovr
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.preprocessing import LabelBinarizer
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
with ignore_warnings():
assert_raises(ValueError, predict_ovr, [LinearSVC(), MultinomialNB()],
LabelBinarizer(), [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
clf = OneVsRestClassifier(base_clf).fit(X, y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
# don't densify sparse coefficients
assert_equal(sp.issparse(ovr.estimators_[0].coef_), sp.issparse(ovr.coef_))
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
prediction_from_list = ovo.fit(iris.data,
list(iris.target)).predict(iris.data)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert_equal(decisions.shape, (n_samples, n_classes))
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.])))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert_greater(len(np.unique(decisions[:, class_idx])), 146)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
@ignore_warnings
def test_deprecated():
base_estimator = DecisionTreeClassifier(random_state=0)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
all_metas = [
(OneVsRestClassifier, fit_ovr, predict_ovr, predict_proba_ovr),
(OneVsOneClassifier, fit_ovo, predict_ovo, None),
(OutputCodeClassifier, fit_ecoc, predict_ecoc, None),
]
for MetaEst, fit_func, predict_func, proba_func in all_metas:
try:
meta_est = MetaEst(base_estimator,
random_state=0).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train,
random_state=0)
except TypeError:
meta_est = MetaEst(base_estimator).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train)
if len(fitted_return) == 2:
estimators_, classes_or_lb = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
X_test),
meta_est.predict(X_test))
if proba_func is not None:
assert_almost_equal(proba_func(estimators_, X_test,
is_multilabel=False),
meta_est.predict_proba(X_test))
else:
estimators_, classes_or_lb, codebook = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
codebook, X_test),
meta_est.predict(X_test))
| bsd-3-clause |
czhengsci/pymatgen | setup.py | 1 | 8355 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import sys
import platform
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext as _build_ext
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
if sys.version_info[0] >= 3:
import builtins
if hasattr(builtins, '__NUMPY_SETUP__'):
del builtins.__NUMPY_SETUP__
import importlib
import numpy
importlib.reload(numpy)
else:
import __builtin__
if hasattr(__builtin__, '__NUMPY_SETUP__'):
del __builtin__.__NUMPY_SETUP__
import imp
import numpy
imp.reload(numpy)
self.include_dirs.append(numpy.get_include())
extra_link_args = []
if sys.platform.startswith('win') and platform.machine().endswith('64'):
extra_link_args.append('-Wl,--allow-multiple-definition')
long_desc = """
Official docs: `http://pymatgen.org <http://pymatgen.org/>`_
Pymatgen (Python Materials Genomics) is a robust, open-source Python library
for materials analysis. These are some of the main features:
1. Highly flexible classes for the representation of Element, Site, Molecule,
Structure objects.
2. Extensive input/output support, including support for VASP
(http://cms.mpi.univie.ac.at/vasp/), ABINIT (http://www.abinit.org/), CIF,
Gaussian, XYZ, and many other file formats.
3. Powerful analysis tools, including generation of phase diagrams, Pourbaix
diagrams, diffusion analyses, reactions, etc.
4. Electronic structure analyses, such as density of states and band structure.
5. Integration with the Materials Project REST API.
Pymatgen is free to use. However, we also welcome your help to improve this
library by making your own contributions. These contributions can be in the
form of additional tools or modules you develop, or feature requests and bug
reports. Please report any bugs and issues at pymatgen's `Github page
<https://github.com/materialsproject/pymatgen>`_. If you wish to be notified
of pymatgen releases, you may become a member of `pymatgen's Google Groups page
<https://groups.google.com/forum/?fromgroups#!forum/pymatgen/>`_.
Why use pymatgen?
=================
There are many materials analysis codes out there, both commerical and free,
but pymatgen offer several advantages:
1. **It is (fairly) robust.** Pymatgen is used by thousands of researchers,
and is the analysis code powering the `Materials Project`_. The analysis it
produces survives rigorous scrutiny every single day. Bugs tend to be
found and corrected quickly. Pymatgen also uses
`CircleCI <https://circleci.com>`_ and `Appveyor <https://www.appveyor.com/>`_
for continuous integration on the Linux and Windows platforms,
respectively, which ensures that every commit passes a comprehensive suite
of unittests. The coverage of the unittests can be seen at
`here <coverage/index.html>`_.
2. **It is well documented.** A fairly comprehensive documentation has been
written to help you get to grips with it quickly.
3. **It is open.** You are free to use and contribute to pymatgen. It also means
that pymatgen is continuously being improved. We will attribute any code you
contribute to any publication you specify. Contributing to pymatgen means
your research becomes more visible, which translates to greater impact.
4. **It is fast.** Many of the core numerical methods in pymatgen have been
optimized by vectorizing in numpy/scipy. This means that coordinate
manipulations are extremely fast and are in fact comparable to codes
written in other languages. Pymatgen also comes with a complete system for
handling periodic boundary conditions.
5. **It will be around.** Pymatgen is not a pet research project. It is used in
the well-established Materials Project. It is also actively being developed
and maintained by the `Materials Virtual Lab`_, the ABINIT group and many
other research groups.
With effect from version 3.0, pymatgen now supports both Python 2.7 as well
as Python 3.x.
"""
setup(
name="pymatgen",
packages=find_packages(),
version="2018.3.14",
cmdclass={'build_ext': build_ext},
setup_requires=['numpy', 'setuptools>=18.0'],
install_requires=["numpy>=1.9", "six", "requests", "ruamel.yaml>=0.15.6",
"monty>=0.9.6", "scipy>=1.0.0", "pydispatcher>=2.0.5",
"tabulate", "spglib>=1.9.9.44",
"matplotlib>=1.5", "palettable>=2.1.1", "sympy", "pandas"],
extras_require={
':python_version == "2.7"': [
'enum34',
],
"provenance": ["pybtex"],
"ase": ["ase>=3.3"],
"vis": ["vtk>=6.0.0"],
"abinit": ["apscheduler==2.1.0"]},
package_data={"pymatgen.core": ["*.json"],
"pymatgen.analysis": ["*.yaml", "*.json"],
"pymatgen.analysis.chemenv.coordination_environments.coordination_geometries_files": ["*.txt", "*.json"],
"pymatgen.analysis.chemenv.coordination_environments.strategy_files": ["*.json"],
"pymatgen.io.vasp": ["*.yaml"],
"pymatgen.io.feff": ["*.yaml"],
"pymatgen.symmetry": ["*.yaml", "*.json"],
"pymatgen.entries": ["*.yaml"],
"pymatgen.structure_prediction": ["data/*.json"],
"pymatgen.vis": ["ElementColorSchemes.yaml"],
"pymatgen.command_line": ["OxideTersoffPotentials"],
"pymatgen.analysis.defects": ["*.json"],
"pymatgen.analysis.diffraction": ["*.json"],
"pymatgen.util": ["structures/*.json"]},
author="Pymatgen Development Team",
author_email="[email protected]",
maintainer="Shyue Ping Ong",
maintainer_email="[email protected]",
url="http://www.pymatgen.org",
license="MIT",
description="Python Materials Genomics is a robust materials "
"analysis code that defines core object representations for "
"structures and molecules with support for many electronic "
"structure codes. It is currently the core analysis code "
"powering the Materials Project "
"(https://www.materialsproject.org).",
long_description=long_desc,
keywords=["VASP", "gaussian", "ABINIT", "nwchem", "materials", "project",
"electronic", "structure", "analysis", "phase", "diagrams"],
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Scientific/Engineering :: Chemistry",
"Topic :: Software Development :: Libraries :: Python Modules"
],
ext_modules=[Extension("pymatgen.optimization.linear_assignment",
["pymatgen/optimization/linear_assignment.c"],
extra_link_args=extra_link_args),
Extension("pymatgen.util.coord_cython",
["pymatgen/util/coord_cython.c"],
extra_link_args=extra_link_args)],
entry_points={
'console_scripts': [
'pmg = pymatgen.cli.pmg:main',
'feff_input_generation = pymatgen.cli.feff_input_generation:main',
'feff_plot_cross_section = pymatgen.cli.feff_plot_cross_section:main',
'feff_plot_dos = pymatgen.cli.feff_plot_dos:main',
'gaussian_analyzer = pymatgen.cli.gaussian_analyzer:main',
'get_environment = pymatgen.cli.get_environment:main',
'pydii = pymatgen.cli.pydii:main',
]
}
) | mit |
jcrist/pydy | examples/language_templates/numpy_scipy.py | 7 | 1316 | # How to integrate equations of motion, quick and dirty way
# Note: this template will not run as is
# get EoM into form <qdots, udots = expressions> first though
# Also, make sure there are no qdots in rhs of udots
# (meaning udot = f(q, u, t), not f(q, qdot, u, t)
# use Kane.kindiffdict to get dictionary, and use subs on your udot vector to
# get rid of qdots in bad places. See examples
from numpy import array, linspace, sin, cos # etc.
import matplotlib.pyplot as plt
from scipy.integrate import odeint
def rhs(y, t, arg1, arg2, etc.):
# unpack variables; remember the order here, it is important
var1, var2, etc. = y
# write return statement
# copy/paste qdots, udots from console/terminal
# make sure mechanics printing was turned on first though
return array([var1dot, var2dot, var3dot, etc.])
# make tuple of arguement values (as floats or something)
args = (arg1, arg2, etc.)
# give initial conditions
y0 = []
# choose a timespan
t = linspace(0, 1)
# call odeint
y = odeint(rhs, y0, t, args)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(t, y)
# give title to plot
ax.set_title('My Problem')
# give x axis label
ax.set_xlabel('Time (s)')
# give y axis label
ax.set_ylabel('Qs and Us')
# set legend values
ax.legend(['var1', 'var2', 'var3', etc.])
# show plot
plt.show()
| bsd-3-clause |
MichielCottaar/pymc3 | pymc3/tests/test_plots.py | 13 | 1721 | import matplotlib
matplotlib.use('Agg', warn=False)
import numpy as np
from .checks import close_to
import pymc3.plots
from pymc3.plots import *
from pymc3 import Slice, Metropolis, find_hessian, sample
def test_plots():
# Test single trace
from pymc3.examples import arbitrary_stochastic as asmod
with asmod.model as model:
start = model.test_point
h = find_hessian(start)
step = Metropolis(model.vars, h)
trace = sample(3000, step, start)
traceplot(trace)
forestplot(trace)
autocorrplot(trace)
def test_plots_multidimensional():
# Test single trace
from .models import multidimensional_model
start, model, _ = multidimensional_model()
with model as model:
h = np.diag(find_hessian(start))
step = Metropolis(model.vars, h)
trace = sample(3000, step, start)
traceplot(trace)
#forestplot(trace)
#autocorrplot(trace)
def test_multichain_plots():
from pymc3.examples import disaster_model as dm
with dm.model as model:
# Run sampler
step1 = Slice([dm.early_mean, dm.late_mean])
step2 = Metropolis([dm.switchpoint])
start = {'early_mean': 2., 'late_mean': 3., 'switchpoint': 50}
ptrace = sample(1000, [step1, step2], start, njobs=2)
forestplot(ptrace, vars=['early_mean', 'late_mean'])
autocorrplot(ptrace, vars=['switchpoint'])
def test_make_2d():
a = np.arange(4)
close_to(pymc3.plots.make_2d(a), a[:,None], 0)
n = 7
a = np.arange(n*4*5).reshape((n,4,5))
res = pymc3.plots.make_2d(a)
assert res.shape == (n,20)
close_to(a[:,0,0], res[:,0], 0)
close_to(a[:,3,2], res[:,2*4+3], 0)
| apache-2.0 |
kedz/cuttsum | trec2014/python/cuttsum/salience.py | 1 | 18804 | import os
import re
import gzip
import pandas as pd
from cuttsum.data import get_resource_manager, MultiProcessWorker
from cuttsum.misc import ProgressBar
import random
import GPy
import numpy as np
from collections import defaultdict
import multiprocessing
import signal
import sys
import Queue
from sklearn.preprocessing import StandardScaler
from sklearn.externals import joblib
from scipy import linalg
class SalienceModels(MultiProcessWorker):
def __init__(self):
self.dir_ = os.path.join(
os.getenv(u'TREC_DATA', u'.'), u'salience-models')
if not os.path.exists(self.dir_):
os.makedirs(self.dir_)
def get_model_dir(self, event, feature_set, prefix):
return os.path.join(self.dir_, prefix + '.' + feature_set.fs_name(),
event.fs_name())
def get_model_paths(self, event, feature_set, prefix, n_samples):
model_dir = self.get_model_dir(event, feature_set, prefix)
return [os.path.join(model_dir, 'model_{}.pkl'.format(ver))
for ver in xrange(n_samples)]
def check_coverage(self, event, corpus,
feature_set, prefix, n_samples=10, **kwargs):
model_dir = self.get_model_dir(event, feature_set, prefix)
if n_samples <= 0:
return 0
if not os.path.exists(model_dir):
return 0
n_covered = 0
for model_path in self.get_model_paths(
event, feature_set, prefix, n_samples):
if os.path.exists(model_path):
n_covered += 1
return n_covered / float(n_samples)
def train_models(self, event, corpus, feature_set, prefix, n_procs=1,
progress_bar=False, random_seed=42,
n_samples=10, sample_size=100, **kwargs):
model_paths = self.get_model_paths(
event, feature_set, prefix, n_samples)
model_dir = self.get_model_dir(event, feature_set, prefix)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
jobs = []
for ver, model_path in enumerate(model_paths):
jobs.append((model_path, random_seed + ver))
self.do_work(salience_train_worker_, jobs, n_procs,
progress_bar, event=event,
corpus=corpus, feature_set=feature_set,
sample_size=sample_size)
def salience_train_worker_(job_queue, result_queue, **kwargs):
signal.signal(signal.SIGINT, signal.SIG_IGN)
corpus = kwargs.get(u'corpus')
event = kwargs.get(u'event')
feature_set = kwargs.get(u'feature_set')
sample_size = kwargs.get(u'sample_size')
while not job_queue.empty():
try:
model_path, random_seed = job_queue.get(block=False)
feats_df, sims_df = load_all_feats(
event, feature_set, sample_size, random_seed)
#sims_df = load_all_sims(event)
n_points = len(feats_df)
assert n_points == len(sims_df)
assert n_points == sample_size
fgroups = get_group_indices(feats_df)
Y = []
X = []
for i in xrange(sample_size):
assert feats_df.iloc[i][u'stream id'] == \
sims_df.iloc[i][u'stream id']
assert feats_df.iloc[i][u'sentence id'] == \
sims_df.iloc[i][u'sentence id']
Y.append(sims_df.iloc[i].values[2:])
X.append(feats_df.iloc[i].values[2:])
Y = np.array(Y, dtype=np.float64)
#y = y[:, np.newaxis]
X = np.array(X, dtype=np.float64)
X += 1.e-2 * np.random.normal(0, 1, X.shape)
#Xma = np.ma.masked_array(X, np.isnan(X))
xrescaler = StandardScaler()
yrescaler = StandardScaler()
X = xrescaler.fit_transform(X)
bad_cols = set(np.where(~X.any(axis=0))[0]) | \
set(np.where(np.all(X == X[0,:], axis=0))[0])
#print bad_cols
#if len(bad_cols) > 0:
# result_queue.put(None)
# return
Y = yrescaler.fit_transform(Y)
y = np.max(Y, axis=1)
y = y[:, np.newaxis]
kern_comb = None
for key, indices in fgroups.items():
good_indices = set(indices) - bad_cols
indices = sorted(list(good_indices))
kern = GPy.kern.RBF(input_dim=len(indices),
active_dims=indices,
ARD=True)
if kern_comb is None:
kern_comb = kern
else:
kern_comb += kern
kern_comb += GPy.kern.White(input_dim=len(indices),
active_dims=indices)
try:
m = GPy.models.GPRegression(X, y, kern_comb)
m.unconstrain('')
m.constrain_positive('')
#m['.*white'].constrain_fixed(1.)
m.optimize_restarts(
num_restarts=10, robust=False, verbose=False,
parallel=False, num_processes=1, max_iters=20)
joblib.dump((xrescaler, yrescaler, m), model_path)
except linalg.LinAlgError, e:
print e
#print X
#print y
result_queue.put(None)
except Queue.Empty:
pass
def load_all_feats(event, fs, sample_size, random_seed):
random.seed(random_seed)
all_df = None
features = get_resource_manager(u'SentenceFeaturesResource')
fregex = fs.get_feature_regex() + "|stream id|sentence id"
resevoir = None
current_index = 0
hours = []
res_sims = None
for hour in event.list_event_hours():
path = features.get_tsv_path(event, hour)
#print path, current_index
if not os.path.exists(path):
continue
with gzip.open(path, u'r') as f:
df = pd.io.parsers.read_csv(
f, sep='\t', quoting=3, header=0)
df = df.filter(regex=fregex)
if resevoir is None:
resevoir = pd.DataFrame(columns=df.columns)
for _, row in df.iterrows():
if current_index < sample_size:
resevoir.loc[current_index] = row
hours.append(hour)
else:
r = random.randint(0, current_index)
if r < sample_size:
resevoir.loc[r] = row
hours[r] = hour
current_index += 1
# if resevoir is None:
# resevoir = df.iloc[range(0, sample_size)]
# current_index = sample_size
# paths = [path for i in range(0, sample_size)]
s = get_resource_manager(u'NuggetSimilaritiesResource')
for hour in set(hours):
path = s.get_tsv_path(event, hour)
with gzip.open(path, u'r') as f:
df = pd.io.parsers.read_csv(
f, sep='\t', quoting=3, header=0)
if res_sims is None:
res_sims = pd.DataFrame([{} for x in range(sample_size)],
columns=df.columns)
for idx, row_hour in enumerate(hours):
if hour != row_hour:
continue
stream_id = resevoir.iloc[idx][u'stream id']
sent_id = resevoir.iloc[idx][u'sentence id']
res_sims.loc[idx] = df.loc[
(df[u'stream id'] == stream_id) & \
(df[u'sentence id'] == sent_id)].iloc[0]
for i in range(sample_size):
assert resevoir[u'sentence id'].iloc[i] == \
res_sims[u'sentence id'].iloc[i]
assert resevoir[u'stream id'].iloc[i] == \
res_sims[u'stream id'].iloc[i]
return resevoir, res_sims
def get_group_indices(feat_df):
idxmap = defaultdict(list)
for idx, feat in enumerate(feat_df.columns[2:]):
idxmap[feat.split('_')[0]].append(idx)
for feat in idxmap.keys():
idxmap[feat].sort()
# start = min(idxmap[feat])
# end = max(idxmap[feat]) + 1
# idxmap[feat] = (start, end)
return idxmap
class SaliencePredictions(MultiProcessWorker):
def __init__(self):
self.dir_ = os.path.join(
os.getenv(u'TREC_DATA', u'.'), u'salience-predictions')
if not os.path.exists(self.dir_):
os.makedirs(self.dir_)
def get_tsv_dir(self, event, prefix, feature_set):
data_dir = os.path.join(self.dir_,
prefix + "." + feature_set.fs_name(),
event.fs_name())
return data_dir
def get_tsv_path(self, event, hour, prefix, feature_set, model_path):
parent_dir, model_num = os.path.split(model_path)
model_name = os.path.split(parent_dir)[-1]
key = model_name + "." + model_num
data_dir = self.get_tsv_dir(event, prefix, feature_set)
return os.path.join(data_dir, u'{}.{}.tsv.gz'.format(
hour.strftime(u'%Y-%m-%d-%H'), key))
def check_coverage(self, event, corpus,
feature_set, prefix, model_events, n_samples=100,
**kwargs):
feats = get_resource_manager(u'SentenceFeaturesResource')
n_feats = 0
n_covered = 0
sm = SalienceModels()
model_paths = []
for model_event in model_events:
model_paths.extend(
sm.get_model_paths(
model_event, feature_set, prefix, n_samples))
for hour in event.list_event_hours():
for model_path in model_paths:
feat_tsv_path = feats.get_tsv_path(event, hour)
sal_tsv_path = self.get_tsv_path(
event, hour, prefix, feature_set, model_path)
if os.path.exists(feat_tsv_path):
n_feats += 1
if os.path.exists(sal_tsv_path):
n_covered += 1
if n_feats == 0:
return 0
return n_covered / float(n_feats)
def predict_salience(self, event, corpus, feature_set,
prefix, model_events, n_procs=1, n_samples=10,
progress_bar=False, **kwargs):
data_dir = self.get_tsv_dir(event, prefix, feature_set)
if not os.path.exists(data_dir):
os.makedirs(data_dir)
feats = get_resource_manager(u'SentenceFeaturesResource')
sm = SalienceModels()
model_paths = []
for model_event in model_events:
model_paths.extend(
sm.get_model_paths(
model_event, feature_set, prefix, n_samples))
jobs = []
# for hour in event.list_event_hours():
for model_path in model_paths:
# feat_tsv_path = feats.get_tsv_path(event, hour)
# sal_tsv_path = self.get_tsv_path(
# event, hour, prefix, feature_set, model_path)
#if os.path.exists(feat_tsv_path):
jobs.append(model_path)
self.do_work(salience_predict_worker_, jobs, n_procs,
progress_bar, event=event,
feature_set=feature_set, prefix=prefix)
def salience_predict_worker_(job_queue, result_queue, **kwargs):
signal.signal(signal.SIGINT, signal.SIG_IGN)
event = kwargs.get(u'event')
fs = kwargs.get(u'feature_set')
prefix = kwargs.get(u'prefix')
feats = get_resource_manager(u'SentenceFeaturesResource')
sp = SaliencePredictions()
# model_paths = kwargs.get(u'model_paths')
# model_paths.sort()
# model_keys = []
# key2models = {}
# n_models = len(model_paths)
# for i, model_path in enumerate(model_paths, 1):
# print i, "/", n_models
#if not os.path.exists(model_path):
# continue
# parent_dir, model_num = os.path.split(model_path)
# model_name = os.path.split(parent_dir)[-1]
# key = model_name + "." + model_num
# key2models[key] = joblib.load(model_path)
# model_keys.append(key)
# n_model_paths = len(model_paths)
fregex = fs.get_feature_regex() + "|stream id|sentence id"
while not job_queue.empty():
try:
model_path = job_queue.get(block=False)
parent_dir, model_num = os.path.split(model_path)
model_name = os.path.split(parent_dir)[-1]
model_key = model_name + "." + model_num
xrescaler, yrescaler, m = joblib.load(model_path)
for hour in event.list_event_hours():
feat_tsv_path = feats.get_tsv_path(event, hour)
sal_tsv_path = sp.get_tsv_path(
event, hour, prefix, fs, model_path)
if not os.path.exists(feat_tsv_path):
continue
if os.path.exists(sal_tsv_path):
continue
with gzip.open(feat_tsv_path, u'r') as f:
feats_df = pd.io.parsers.read_csv(
f, sep='\t', quoting=3, header=0)
feats_df = feats_df.filter(regex=fregex)
feats_df = feats_df.sort([u'stream id', u'sentence id'])
n_points = len(feats_df)
sims = []
X = []
for i in xrange(n_points):
streamid = feats_df.iloc[i][u'stream id']
sentid = feats_df.iloc[i][u'sentence id']
x = feats_df.iloc[i].values[2:]
#print len(feats_df.columns), len(x), x
X.append(x)
sims.append({u'stream id': streamid, u'sentence id': sentid})
X = np.array(X, dtype=np.float64)
# print X.shape
#for model_key, (xrescaler, yrescaler, m) in key2models.items():
# print model_key
#for model_path in model_paths:
# parent_dir, model_num = os.path.split(model_path)
# model_name = os.path.split(parent_dir)[-1]
# model_key = model_name + "." + model_num
#if not os.path.exists(model_path):
# continue
#print model_path
#xrescaler, yrescaler, m = joblib.load(model_path)
#print xrescaler.mean_.shape
Xscale = xrescaler.transform(X)
result = m.predict(Xscale)
#print len(result)
##print result[0].shape
yp = result[0]
for i, y in enumerate(yp):
sims[i][model_key] = y[0]
sims_df = pd.DataFrame(
sims, columns=[u'stream id', u'sentence id', model_key])
with gzip.open(sal_tsv_path, u'w') as f:
sims_df.to_csv(f, sep='\t', index=False, index_label=False)
result_queue.put(None)
except Queue.Empty:
pass
class SaliencePredictionAggregator(object):
def __init__(self):
self.dir_ = os.path.join(
os.getenv(u'TREC_DATA', u'.'), u'salience-predictions-agg')
if not os.path.exists(self.dir_):
os.makedirs(self.dir_)
def check_coverage(self, event, corpus, feature_set,
prefix, model_events, n_samples=10, **kwargs):
feats = get_resource_manager(u'SentenceFeaturesResource')
n_hours = 0
n_covered = 0
for hour in event.list_event_hours():
feats_tsv_path = feats.get_tsv_path(event, hour)
sal_tsv_path = self.get_tsv_path(event, hour, prefix, feature_set)
if os.path.exists(feats_tsv_path):
n_hours += 1
if os.path.exists(sal_tsv_path):
n_covered += 1
if n_hours == 0:
return 0
else:
return n_covered / float(n_hours)
def get(self, event, corpus, feature_set,
prefix, model_events, n_samples=10, **kwargs):
feats = get_resource_manager(u'SentenceFeaturesResource')
sm = SalienceModels()
sp = SaliencePredictions()
model_paths = []
for model_event in model_events:
model_paths.extend(
sm.get_model_paths(
model_event, feature_set, prefix, n_samples))
hours = event.list_event_hours()
n_hours = len(hours)
pb = ProgressBar(n_hours)
for hour in hours:
pb.update()
tsv_paths = \
[sp.get_tsv_path(event, hour, prefix, feature_set, model_path)
for model_path in model_paths]
tsv_paths = [path for path in tsv_paths if os.path.exists(path)]
if len(tsv_paths) == 0:
continue
data = []
for tsv_path in tsv_paths:
with gzip.open(tsv_path, u'r') as f:
df = pd.io.parsers.read_csv(
f, sep='\t', quoting=3, header=0)
df.set_index([u'stream id', u'sentence id'], inplace=True)
data.append(df)
df = pd.concat(data, axis=1)
agg_path = self.get_tsv_path(event, hour, prefix, feature_set)
agg_dir = os.path.dirname(agg_path)
if not os.path.exists(agg_dir):
os.makedirs(agg_dir)
df.columns=sorted(df.columns)
with gzip.open(agg_path, u'w') as f:
df.to_csv(f, sep='\t')
def get_tsv_dir(self, event, prefix, feature_set):
data_dir = os.path.join(self.dir_,
prefix + "." + feature_set.fs_name(),
event.fs_name())
return data_dir
def get_tsv_path(self, event, hour, prefix, feature_set):
data_dir = self.get_tsv_dir(event, prefix, feature_set)
return os.path.join(data_dir, u'{}.tsv.gz'.format(
hour.strftime(u'%Y-%m-%d-%H')))
def get_dataframe(self, event, hour, prefix, feature_set):
tsv = self.get_tsv_path(event, hour, prefix, feature_set)
if not os.path.exists(tsv):
return None
else:
with gzip.open(tsv, u'r') as f:
df = pd.io.parsers.read_csv(
f, sep='\t', quoting=3, header=0)
return df
| apache-2.0 |
ibackus/ICgen | isaac.py | 2 | 50087 | """
-----------------------------------------------
Some simple python code to be easily imported from python
-----------------------------------------------
"""
import pynbody
SimArray = pynbody.array.SimArray
pb = pynbody
import copy
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import scipy.interpolate as interp
import warnings
import glob
import os
import datetime
import fnmatch
import logging
self_dir = os.path.dirname(os.path.realpath(__file__))
print os.path.realpath(__file__)
def snapshot_defaults(snapshot):
"""
Applies various defaults to tipsy snapshots of protoplanetary disk
simulations. These include:
-Sets nice units
-Calculates particle smoothing lengths using mass and rho (if available)
Changes the snapshot in place
"""
# Setup units
snapshot['pos'].convert_units('au')
snapshot['mass'].convert_units('Msol')
snapshot['vel'].convert_units('km s**-1')
snapshot['eps'].convert_units('au')
snapshot.g['temp'].convert_units('K')
# Calculate smoothing lengths
if ('rho' in snapshot.g.loadable_keys()) or ('rho' in snapshot.g.keys()):
snapshot.g['rho'].convert_units('Msol au**-3')
if ~(np.any(snapshot.g['rho'] == 0)):
snapshot.g['smooth'] = (snapshot.g['mass']/snapshot.g['rho'])**(1,3)
return
def units_from_param(param):
"""
Figures out the simulation units from a .param file
**ARGUMENTS**
param : str or param dict (see configparser)
Simulation .param file or param dict loaded by configparser
Can also be a list or numpy array of these in which case a list
of units dicts is returned
**RETURNS**
units : dict
A dictionary of the units used in the simulation, returned as
pynbody units
"""
# Define function to load the units from a given param
def _load_units(param):
# Load param if necessary
if isinstance(param, str):
param = configparser(param, 'param')
# Universal G
G = pynbody.units.G
# Load units
dKpcUnit = param['dKpcUnit']
dMsolUnit = param['dMsolUnit']
# Set up pynbody units
m_unit = pynbody.units.Unit('{0} Msol'.format(dMsolUnit))
l_unit = pynbody.units.Unit('{0} kpc'.format(dKpcUnit))
t_unit = (l_unit**3/(G*m_unit))**(1,2)
# Convert the time unit to something sensible
years = t_unit.in_units('yr')
t_unit = pynbody.units.Unit('{0} yr'.format(years))
# Return
outdict = {'l_unit':l_unit, 'm_unit':m_unit, 't_unit':t_unit}
return outdict
# Iterate over param if necessary
if isinstance(param, (list, np.ndarray)):
outlist = []
for par in param:
outlist.append(_load_units(par))
return outlist
else:
# Not iterable
return _load_units(param)
def kepler_pos(pos, vel, t, Mstar, order=10):
"""
Estimate position at future time t assuming an elliptical keplerian orbit
"""
G = SimArray(1.0, 'G')
mu = G*Mstar
r = np.sqrt((pos**2).sum())
v = np.sqrt((vel**2).sum())
# Calculate semi-major axis
a = mu*r/(2*mu - v*v*r)
a.convert_units(r.units)
# Calculate eccentricity vector
ecc = (v*v)*pos/mu - ((pos*vel).sum())*vel/mu - pos/r
ecc.convert_units('1')
# Calculate eccentricity
e = float(np.sqrt((ecc**2).sum()))
# Calculate initial eccentric anomaly
# x1 = a*e^2 + r.e
x1 = a*e**2 + (pos*ecc).sum()
# y1 = |r x e| * sign(r.v)
y1 = np.sqrt((np.cross(pos, ecc)**2).sum())
y1 *= (pos*vel).sum()/abs((pos*vel).sum())
E0 = np.arctan2(y1,x1)
# Calculate mean anomaly
M0 = E0 - e*np.sin(E0)
a3 = np.power(a,3)
M = (np.sqrt(mu/a3)*t).in_units('1') + M0
# Calculate eccentric anomaly
E = E0
for i in range(order):
E = M + e*np.sin(E)
# Calculate (x1, y1) (relative to center of ellipse, not focus)
x1 = (2*a - r) * np.cos(E)
y1 = (2*a - r) * np.sin(E)
# Transform to original coordinates
x1hat = ecc/np.sqrt((ecc**2).sum())
y1hat = np.cross(np.cross(pos, vel), ecc)
y1hat /= np.sqrt((y1hat**2).sum())
pos_f = (x1 - a*e)*x1hat + y1*y1hat
return pos_f
def findfiles(filefilter='*', basedir='.'):
"""
Recursively find files according to filefilter
** ARGUMENTS **
filefilter : str
Filter for finding files. ie, '*.jpg' or 'file.txt'
basedir : str
Base directory to search. Default is the current directory
** RETURNS **
files : list
A list of the full path to all files matching filefilter
"""
matches = []
for root, dirnames, filenames in os.walk(basedir):
for filename in fnmatch.filter(filenames, filefilter):
fname = os.path.join(root, filename)
fname = os.path.realpath(fname)
matches.append(fname)
return matches
def walltime(filename):
"""
Reads walltime information from a ChaNGa .log file.
** ARGUMENTS **
filename : str
Filename of the .log file to load
** RETURNS **
wall_per_step : array
Wall time per step in seconds
"""
log_file = np.genfromtxt(filename, comments='#', delimiter=' ')
wall_per_step = log_file[:,-1]
walltime_total = datetime.timedelta(seconds = wall_per_step.sum())
walltime_avg = datetime.timedelta(seconds = wall_per_step.mean())
print 'Total walltime: '
print str(walltime_total)
print 'Average walltime per step:'
print str(walltime_avg)
return wall_per_step
def load_acc(filename, param_name = None, low_mem = True):
"""
Loads accelerations from a ChaNGa acceleration file (.acc2), ignoring the
star particle.
IF param_name is None, a .param file is searched for, otherwise param_name
should be a string specifying a .param file name
IF no param_file is found, the defaults are used:
length unit: AU
mass unit : Msol
Setting low_mem=True decreases memory usage by about 2x but also increases
readtime by about 2x
"""
if param_name is None:
prefix = filename.split('.')[0]
param_list = glob.glob('*' + prefix +'*param')
if len(param_list) > 0:
param_name = param_list[0]
elif len(glob.glob('*.param')) > 0:
param_name = glob.glob('*.param')[0]
else:
warnings.warn('Could not find .param file. Assuming default units')
if param_name is not None:
# If a param name is set or a param file has been found:
print 'Loading param file: {}'.format(param_name)
param = configparser(param_name, ftype='param')
else:
# Set the default parameters
param = {}
# Assume AU as length unit
param['dKpcUnit'] = pynbody.units.au.ratio('kpc')
# Assume mass units as Msol
param['dMsolUnit'] = 1.0
# Figure out units
G = pynbody.units.G
l_unit = param['dKpcUnit']*pynbody.units.kpc
m_unit = param['dMsolUnit']*pynbody.units.Msol
t_unit = ((l_unit**3) * G**-1 * m_unit**-1)**(1,2)
a_unit = l_unit * t_unit**-2
if low_mem:
acc_file = open(filename, 'r')
n_particles = int(acc_file.readline().strip())
acc = SimArray(np.zeros(3*n_particles, dtype=np.float32), a_unit)
for i, line in enumerate(acc_file):
acc[i] = np.float32(line.strip())
acc_file.close()
return acc.reshape([n_particles, 3], order='F')[0:-1]
else:
# Load acceleration file as numpy array
acc = np.genfromtxt(filename, skip_header=1, dtype=np.float32)
n_particles = len(acc)/3
# Reshape and make it a SimArray with proper units
acc = SimArray(acc.reshape([n_particles, 3], order='F'), a_unit)
return acc
def height(snapshot, bins=100, center_on_star=True):
"""
Calculates the characteristic height (h) of a flared disk as a function
of cylindrical radius (r).
** ARGUMENTS **
snapshot : TipsySnap
Simulation snapshot for a flared disk
bins : int or array_like
Specifies the bins to use. If int, specifies the number of bins. If
array_like, specifies the bin edges
center_on_star : bool
If true (DEFAULT), cylindrical r is calculated relative to the star
** RETURNS **
r_edges : SimArray
Radial bin edges used for calculating h. Length N+1
h : SimArray
Height as a function of r, calculated as the RMS of z over a bin.
Length N
"""
# Center on star
if center_on_star:
star_pos = snapshot.s['pos'].copy()
snapshot['pos'] -= star_pos
else:
star_pos = 0.0*snapshot.s['pos']
# Calculate height
r = snapshot.g['rxy']
z2 = snapshot.g['z']**2
r_edges, z2_mean, err = binned_mean(r, z2, bins=bins, ret_bin_edges=True)
h = np.sqrt(z2_mean)
# Add star_pos back to snapshot
snapshot['pos'] += star_pos
return r_edges, h
def sigma(snapshot, bins=100):
"""
Calculates surface density vs r (relative to the center of mass)
** ARGUMENTS **
snapshot : tipsy snapshot
bins : int, list, array...
Either the number of bins to use or the binedges to use
** RETURNS **
sigma : SimArray
Surface density as a function of r
r_bins : SimArray
Radial bin edges
"""
# Begin by subtracting off the center of mass position
cm = (snapshot['mass'][:,None] * snapshot['pos']).sum()/(snapshot['mass'].sum())
snapshot['pos'] -= cm
r = snapshot.g['rxy']
# particle mass
m_gas = snapshot.gas['mass'][[0]]
N, r_bins = np.histogram(r, bins=bins)
r_bins = match_units(r_bins, r.units)[0]
r_center = (r_bins[1:] + r_bins[0:-1])/2
dr = r_bins[[1]] - r_bins[[0]]
sig = N*m_gas/(2*np.pi*r_center*dr)
# Add star position back to positions
snapshot['pos'] += cm
return sig, r_bins
def Q2(snapshot, molecular_mass = 2.0, bins=100, max_height=None):
# Physical constants
kB = SimArray([1.0],'k')
G = SimArray([1.0],'G')
# Load stuff froms snapshot
v = snapshot.g['vt']
r = snapshot.g['rxy']
z = snapshot.g['z']
T = snapshot.g['temp']
# Calculate sound speed for all particles
m = match_units(molecular_mass,'m_p')[0]
cs = np.sqrt(kB*T/m)
# Calculate surface density
sig_binned, r_edges = sigma(snapshot, bins)
r_cent = (r_edges[1:]+r_edges[0:-1])/2
sig_spl = extrap1d(r_cent, sig_binned)
sig = SimArray(sig_spl(r), sig_binned.units)
# Calculate omega (as a proxy for kappa)
omega = v/r
kappa = omega
#Calculate Q for all particles
print 'kappa',kappa.units
print 'cs',cs.units
print 'sigma', sig.units
Q_all = (kappa*cs/(np.pi*G*sig)).in_units('1')
# Use particles close to midplane
if max_height is not None:
dummy, h = height(snapshot, bins=r_edges)
ind = np.digitize(r, r_edges) - 1
ind[ind<0] = 0
ind[ind >= (len(r_edges)-1)] = len(r_edges)-2
mask = abs(z) < (max_height*h[ind])
Q_all = Q_all[mask]
r = r[mask]
dummy, Q_binned, dummy2 = binned_mean(r, Q_all, binedges=r_edges)
return r_edges, Q_binned
def kappa(f, bins=100):
"""
Estimate the epicyclic frequency from velocity
**ARGUMENTS**
f : TipsySnap
Simulation snapshot
bins : int or array-like
Either the number of bins to use or the bin edges
**RETURNS**
kappa : SimArray
epicyclic frequency
r_edges : SimArray
binedges used
"""
# Require regular spacing of bins
if not isinstance(bins, int):
dr = bins[[1]] - bins[[0]]
eps = np.finfo(bins.dtype).eps
if not np.all(bins[1:] - bins[0:-1] <= dr + 1000*eps):
raise ValueError, 'Bins not uniformly spaced'
r = f.g['rxy']
v = f.g['vt']
r_edges, v_mean, dummy = binned_mean(r, v, bins=bins, ret_bin_edges=True)
dummy, rv_mean, dummy2 = binned_mean(r, r*v, bins=r_edges)
r_cent = (r_edges[1:] + r_edges[0:-1])/2
dr = r_edges[[1]] - r_edges[[0]]
drv_dr = np.gradient(rv_mean, dr)
kappa = np.sqrt(2*v_mean*drv_dr)/r_cent
return kappa, r_edges
def Q(snapshot, molecular_mass = 2.0, bins=100, max_height=None, \
use_velocity=False, use_omega=True):
"""
Calculates the Toomre Q as a function of r, assuming radial temperature
profile and kappa ~= omega
** ARGUMENTS **
snapshot : tipsy snapshot
molecular_mass : float
Mean molecular mass (for sound speed). Default = 2.0
bins : int or array
Either the number of bins or the bin edges
use_velocity : Bool
Determines whether to use the particles' velocities to calculate orbital
velocity. Useful if the circular orbital velocities are set in the
snapshot.
use_omega : Bool
Default=True. Use omega as a proxy for kappa to reduce noise
** RETURNS **
Q : array
Toomre Q as a function of r
r_edges : array
Radial bin edges
"""
# Physical constants
kB = SimArray([1.0],'k')
G = SimArray([1.0],'G')
# Calculate surface density
sig, r_edges = sigma(snapshot, bins)
# Calculate sound speed
m = match_units(molecular_mass,'m_p')[0]
c_s_all = np.sqrt(kB*snapshot.g['temp']/m)
# Bin/average sound speed
dummy, c_s, dummy2 = binned_mean(snapshot.g['rxy'], c_s_all, binedges=r_edges)
if use_omega:
# Calculate keplerian angular velocity (as a proxy for the epicyclic
# frequency, which is a noisy calculation)
if use_velocity:
# Calculate directly from particle's velocity
dummy, omega, dummy2 = binned_mean(snapshot.g['rxy'], \
snapshot.g['vt']/snapshot.g['rxy'], binedges=r_edges)
else:
# Estimate, from forces, using pynbody
p = pynbody.analysis.profile.Profile(snapshot, bins=r_edges)
omega = p['omega']
kappa_calc = omega
else:
if use_velocity:
# Calculate directly from particle's velocities
kappa_calc, dummy = kappa(snapshot, r_edges)
else:
# Estimate, from forces, using pynbody
p = pynbody.analysis.profile.Profile(snapshot, bins=r_edges)
kappa_calc = p['kappa']
return (kappa_calc*c_s/(np.pi*G*sig)).in_units('1'), r_edges
def Q_eff(snapshot, molecular_mass=2.0, bins=100):
"""
Calculates the effective Toomre Q as a function of r, assuming radial temp
profile and kappa ~= omega and scaleheight << wavelength. This assumption
simplifies the calculation of Q_eff (where wavelength is the wavelength of
the disturbances of interest)
** ARGUMENTS **
snapshot : tipsy snapshot
molecular_mass : float
Mean molecular mass (for sound speed). Default = 2.0
bins : int or array
Either the number of bins or the bin edges
** RETURNS **
Qeff : array
Effective Toomre Q as a function of r for scale height << wavelength
r_edges : array
Radial bin edges
"""
# Physical constants
kB = SimArray([1.0],'k')
G = SimArray([1.0],'G')
# Calculate surface density
sig, r_edges = sigma(snapshot, bins)
# Calculate keplerian angular velocity (as a proxy for the epicyclic
# frequency, which is a noisy calculation)
p = pynbody.analysis.profile.Profile(snapshot, bins=r_edges)
omega = p['omega']
# Calculate sound speed
m = match_units(molecular_mass,'m_p')[0]
c_s_all = np.sqrt(kB*snapshot.g['temp']/m)
# Bin/average sound speed
dummy, c_s, dummy2 = binned_mean(snapshot.g['rxy'], c_s_all, binedges=r_edges)
# Calculate scale height
dummy, h = height(snapshot, bins=r_edges, center_on_star=False)
a = np.pi*G*sig
b = (2*a*h/c_s**2).in_units('1')
Q0 = (omega*c_s/a).in_units('1')
return Q0 * np.sqrt(1 + b), r_edges
return ((omega*c_s/a) * np.sqrt(1 + 2*a*h/c_s**2)).in_units('1'), r_edges
def strip_units(x):
"""
Removes the units from a SimArray and returns as a numpy array. Note
that x is copied so that it is not destroyed
x can be a single SimArray or a tuple or list of SimArrays
If any of the inputs are single number, they are returned as a number
USAGE:
array = strip_units(SimArray)
array1, array2, ... = strip_units([SimArray1, SimArray2, ...])
"""
if isinstance(x, (tuple,list)):
# loop through and assign output
x_out = []
for x_i in x:
if np.prod(x_i.shape) == 1:
# There is only one element in x_i. Make sure to return it as
# a number (not an array)
if np.sum(x_i.shape) == 0:
# This is a zero dimensional SimArray
x_out.append(x_i.tolist())
else:
# This is 1 dimensional SimArray
x_out.append(x_i[0])
else:
#This is a multi-element SimArray
x_out.append(np.asarray(x_i.tolist()))
else:
if np.prod(x.shape) == 1:
# There is only one element in x_i. Return as a number
if np.sum(x.shape) == 0:
# This is a 0 dimensional SimArray
x_out = x.tolist()
else:
# This a 1 dimensional SimArray
x_out = x[0]
else:
x_out = np.asarray(x.tolist())
return x_out
def set_units(x, units):
"""
Sets the units of x to units. If x has units, they are ignored.
Does not destroy/alter x
USAGE:
SimArray = set_units(x, units)
SimArray1, SimArray2, ... = set_units([x1, x2, ...], units)
SimArray1, SimArray2, ... = set_units([x1, x2, ...], [units1, units2, ...])
"""
if isinstance(x, (tuple,list)):
x_out = []
if not isinstance(units, (tuple, list)):
units = [units]*len(x)
for i in range(len(x)):
x_i = x[i]
if pynbody.units.has_units(x_i):
x_i_array = strip_units(x_i)
x_out.append(SimArray(x_i_array, units[i]))
else:
x_out.append(SimArray(x_i, units[i]))
else:
if pynbody.units.has_units(x):
x_array = strip_units(x)
x_out = SimArray(x_array, units)
else:
x_out = SimArray(x, units)
return x_out
def setup_param(param, snapshot=None, r_orb=1.0, n_orb=10.0, n_image=None, n_snap=100, \
n_check=None):
"""
Sets up the following for a .param file:
nSteps
dDumpFrameStep
iOutInterval
iCheckInterval
**ARGUMENTS**
param : str or param_dict (see isaac.configparser, configsave)
parameter file for the simulation, must already have dDelta and units
set properly
IF a str, assumed to be a filename
snapshot : str or TipsySnap(see pynbody) or None
Snapshot for the simulation. Needed to estimate the outer orbital
period.
IF a str, assumed to be a filename
IF None, the file pointed to by param is used
r_orb : float
radius to calculate the outer orbital period at as a fraction of the
radius of the farthest out particle. Must be between 0 and 1
n_orb : float
number of outer orbital periods to run simulation for
n_image : int or None
Total number of frames to dump (ie, dDumpFrameStep)
If None, defaults to n_snap
n_snap : int
Total number of simulation outputs
n_check : int or None
Total number of simulation checkpoints. If None, defaults to n_snap
"""
if (r_orb > 1) | (r_orb < 0):
raise ValueError, 'r_orb must be between 0 and 1'
if isinstance(snapshot, str):
# A filename has been passed, not a tipsy snapshot
snapshot = pynbody.load(snapshot)
if isinstance(param, str):
# A filename has been passed. Load the dictionary
param = configparser(param, 'param')
else:
# Copy so as to not overwrite the input dict
param = copy.deepcopy(param)
R_max = r_orb * snapshot.g['rxy'].max()
M_star = snapshot.s['mass']
# Read in .param stuff
l_unit = '{} kpc'.format(param['dKpcUnit'])
m_unit = '{} Msol'.format(SimArray(param['dMsolUnit'], 'Msol'))
# Outer radius and star mass in simulation units
r = float(R_max.in_units(l_unit))
M = float(M_star.in_units(m_unit))
# Calculate the number of time steps to use
dt = param['dDelta']
period = 2*np.pi*np.sqrt(r**3/M)
N = int(np.round(n_orb * period/dt))
param['nSteps'] = N
# Calculate how often to output snapshots, frames, checkpoints
if n_check is None:
n_check = n_snap
if n_image is None:
n_image = n_snap
param['dDumpFrameStep'] = int(N/n_image)
param['iOutInterval'] = int(N/n_snap)
param['iCheckInterval'] = int(N/n_check)
return param
def make_submission_script(param_name, directory=None, nodes=1, walltime=12, changa='ChaNGa_uw_mpi', jobname='changasim', scriptname='subber.sh', backfill=True):
"""
Creates a submission script for qsub. This is highly platform dependent
"""
# Set up simulation directory
if directory is None:
directory = os.getcwd()
# Load param file
param = configparser(param_name, 'param')
fprefix = param['achOutName']
# Format walltime for qsub
seconds = int(walltime*3600)
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
walltime_str = '{0:02d}:{1:02d}:{2:02d}'.format(h,m,s)
# Format walltime for changa
walltime_min = int(walltime*60)
# Open submission script
subber = open(scriptname,'w')
# Write to submission script
subber.write('#!/bin/bash\n\
#PBS -N {0}\n\
#PBS -j oe\n\
#PBS -m be\n\
#PBS -M [email protected]\n\
#PBS -l nodes={1}:ppn=12,feature=12core\n\
#PBS -l walltime={2}\n\
#PBS -V\n'.format(jobname, nodes, walltime_str))
if backfill:
subber.write('#PBS -q bf\n')
subber.write('module load gcc_4.4.7-ompi_1.6.5\n')
subber.write('export MX_RCACHE=0\n')
subber.write('workdir={0}\n'.format(directory))
subber.write('cd $workdir\n')
subber.write('changbin=$(which {0})\n'.format(changa))
subber.write('if [ -e "lastcheckpoint" ]\n\
then\n\
echo "lastcheckpoint exists -- restarting simulation..."\n\
last=`cat lastcheckpoint`\n\
mpirun --mca mtl mx --mca pml cm $changbin +restart {0}.chk$last +balancer MultistepLB_notopo -wall {2} $workdir/{1} >> $workdir/{0}.out 2>&1\n\
else\n\
echo "lastcheckpoint doesnt exist -- starting new simulation..."\n\
mpirun --mca mtl mx --mca pml cm $changbin -D 3 +consph +balancer MultistepLB_notopo -wall {2} $workdir/{1} >& $workdir/{0}.out\n\
fi\n\
'.format(fprefix, param_name, walltime_min))
subber.close()
# Make submission script executable
os.system('chmod a+rwx {}'.format(scriptname))
def make_param(snapshot, filename=None):
"""
Generates a default param dictionary. Can be saved using isaac.configsave
EXAMPLE
snapshot = pynbody.load('snapshot.std') # Load snapshot
param_dict = isaac.make_param(snapshot) # Make default param dict
isaac.configsave(param_dict, 'snapshot.param', ftype='param') # Save
Optionally, the user can set the snapshot filename manually
"""
fname_def = os.path.join(self_dir, 'default.param')
param = configparser(fname_def, ftype='param')
if filename is not None:
param['achInFile'] = filename
param['achOutName'] = os.path.splitext(filename)[0]
elif snapshot.filename != '<created>':
param['achInFile'] = snapshot.filename
param['achOutName'] = os.path.splitext(snapshot.filename)[0]
# Set up the length units
param['dKpcUnit'] = snapshot['pos'].units.ratio('kpc')
# Set up the mass units
param['dMsolUnit'] = snapshot['mass'].units.ratio('Msol')
# Set the mean molecular mass
param['dMeanMolWeight'] = snapshot.gas['mu'][0]
return param
def make_director(sigma_min, sigma_max, r, resolution=1200, filename='snapshot'):
"""
Makes a director dictionary for ChaNGa runs based on the min/max surface
density, maximum image radius, and image resolution for a gaseous
protoplanetary disk. The created dictionary can be saved with
isaac.configsave
The method is to use an example director file (saved as default.director)
which works for one simulation and scale the various parameters accordingly.
default.director should have a commented line in it which reads:
#sigma_max float
where float is the maximum surface density of the simulation in simulation
units.
**ARGUMENTS**
sigma_min : float
The surface density that corresponds to 0 density on the image (ie the
minimum threshold). Required for setting the dynamic range
sigma_max : float
Maximum surface density in the simulation
r : float
Maximum radius to plot out to
resolution : int or float
Number of pixels in image. The image is shape (resolution, resolution)
filename : str
prefix to use for saving the images. Example: if filename='snapshot',
then the outputs will be of form 'snapshot.000000000.ppm'
**RETURNS**
director : dict
A .director dictionary. Can be saved with isaac.configsave
"""
# -----------------------------------------------------------
# Parse defaults to get scale factor for c
# -----------------------------------------------------------
defaults = configparser(os.path.join(self_dir, 'default.director'))
if '#sigma_max' not in defaults:
raise KeyError,'Default .director file should have a line e.g. << #sigma_max 0.01 >>'
sigma_max0 = defaults['#sigma_max']
c0 = defaults['colgas'][3]
n0 = defaults['size'][0]
r0 = defaults['eye'][2]
A = (c0 * float(n0)**2)/(sigma_max0 * r0**2)
# -----------------------------------------------------------
# Create new director dictionary
# -----------------------------------------------------------
director = copy.deepcopy(defaults)
director.pop('#sigma_max', None)
logscale_min = sigma_min/sigma_max
if pynbody.units.has_units(logscale_min):
logscale_min = float(logscale_min.in_units('1'))
c = A * float(sigma_max * r**2 /float(resolution)**2)
director['colgas'][3] = c
director['size'] = [resolution, resolution]
director['eye'][2] = r
director['file'] = filename
return director
def match_units(x, y):
"""
Matches the units of x to y and returns x and y in the same units.
IF x and y don't have units, they are unchanged
IF one of x or y has units, the unitless quantity is returned as a
SimArray (see pynbody.array.SimArray) with the units of the other quantity.
IF both have units, then an attempt is made to convert x into the units of
y. If this is not possible, an error is raised, for example if x is in
units of 'au' and y is in units of 'Msol'
x, y can be: scalar, array, SimArray, pynbody unit (eg pynbody.units.G),
or a unit string (eg 'Msol a**-2')
*** RETURNS ***
x, y are returned as a tuple
"""
# ----------------------------------------------
# Check if either is a string
# ----------------------------------------------
if isinstance(x, str):
x = SimArray(1.0, x)
if isinstance(y,str):
y = SimArray(1.0, y)
# ----------------------------------------------
# Check if one is a pynbody unit
# ----------------------------------------------
# If one is a named unit (eg pynbody.units.G), convert to SimArray
if isinstance(x, pynbody.units.UnitBase):
x = SimArray(1.0, x)
if isinstance(y, pynbody.units.UnitBase):
y = SimArray(1.0, y)
# ----------------------------------------------
# Check the units
# ----------------------------------------------
# If both have units, try to convert x to the units of y
if (pynbody.units.has_units(x)) & (pynbody.units.has_units(y)):
x_out = (x.in_units(y.units))
y_out = y
# If only x has units, make y a SimArray with the units of x
elif (pynbody.units.has_units(x)):
y_out = SimArray(y, x.units)
x_out = x
# If only y has units, make x a SimArray with the units of y
elif (pynbody.units.has_units(y)):
x_out = SimArray(x, y.units)
y_out = y
# Otherwise, neither has units
else:
x_out = x
y_out = y
# Try to copy so that changing x_out, y_out will not change x,y
try:
x_out = x_out.copy()
except AttributeError:
pass
try:
y_out = y_out.copy()
except AttributeError:
pass
return x_out, y_out
def digitize_threshold(x, min_per_bin = 0, bins=10):
"""
Digitizes x according to bins, similar to numpy.digitize, but requires
that there are at least min_per_bin entries in each bin. Bins that do not
have enough entries are combined with adjacent bins until they meet the
requirement.
**ARGUMENTS**
x : array_like
Input array to be binned. Must be 1-dimensional
min_per_bin : int
Minimum number of entries per bin. Default = 0
bins : int or sequence of scalars, optional
[same as for np.histogram]
If bins is an int, it defines the number of equal-width bins in the
given range (10, by default). If bins is a sequence, it defines the
bin edges, including the rightmost edge, allowing for non-uniform bin
widths.
**RETURNS**
A tuple containing:
ind : array_like
Indices of the bin each element of x falls into, such that:
bin_edges[i] <= x[i] < bin_edges[i+1]
(See np.digitize, this uses the same convention)
bin_edges: array_like
The edges of the bins
"""
# Find number in each bin
N, bin_edges = np.histogram(x, bins)
if N.sum() < min_per_bin:
raise RuntimeError,'Not enough particles within the bin range'
n_bins = len(bin_edges) - 1
# Find out which binedges to delete
edge_mask = np.ones(len(bin_edges), dtype='bool')
for i in range(n_bins - 1):
# Work forwards
if N[i] < min_per_bin:
# Set mask to not use the right bin edge
edge_mask[i+1] = False
# Combine the particles in current and next bin
N[i] += N[i+1]
N[i+1] = N[i]
bin_mask = edge_mask[1:]
N = N[bin_mask]
bin_edges = bin_edges[edge_mask]
edge_mask = np.ones(len(bin_edges), dtype='bool')
n_bins = len(bin_edges) - 1
for i in range(n_bins-1, 0, -1):
# Work backwards
if N[i] < min_per_bin:
# Set mask to not use the left bin edge
edge_mask[i] = False
# Combine the particles in current and next bin
N[i] += N[i-1]
N[i-1] = N[i]
bin_edges = bin_edges[edge_mask]
ind = np.digitize(x, bin_edges)
return ind, bin_edges
def binned_mean(x, y, bins=10, nbins=None, binedges = None, weights=None,\
weighted_bins=False, ret_bin_edges=False):
"""
Bins y according to x and takes the average for each bin.
bins can either be an integer (the number of bins to use) or an array of
binedges. bins will be overridden by nbins or binedges
Optionally (for compatibility reasons) if binedges is specified, the
x-bins are defined by binedges. Otherwise the x-bins are determined by
nbins
If weights = None, equal weights are assumed for the average, otherwise
weights for each data point should be specified
y_err (error in y) is calculated as the standard deviation in y for each
bin, divided by sqrt(N), where N is the number of counts in each bin
IF weighted_bins is True, the bin centers are calculated as a center of
mass
NaNs are ignored for the input. Empty bins are returned with nans
RETURNS a tuple of (bin_centers, y_mean, y_err) if ret_bin_edges=False
else, Returns (bin_edges, y_mean, y_err)
"""
if (isinstance(bins, int)) and (nbins is None):
nbins = bins
elif (hasattr(bins, '__iter__')) and (binedges is None):
binedges = bins
if binedges is not None:
nbins = len(binedges) - 1
else:
binedges = np.linspace(x.min(), (1 + np.spacing(2))*x.max(), nbins + 1)
if weights is None:
weights = np.ones(x.shape)
weights = strip_units(weights)
# Pre-factor for weighted STD:
A = 1/(1 - (weights**2).sum())
# Initialize
y_mean = np.zeros(nbins)
y_std = np.zeros(nbins)
# Find the index bins for each data point
ind = np.digitize(x, binedges) - 1
# Ignore nans
nan_ind = np.isnan(y)
N = np.histogram(x, binedges)[0]
# Initialize bin_centers (try to retain units)
bin_centers = 0.0*binedges[1:]
for i in range(nbins):
#Indices to use
mask = (ind==i) & (~nan_ind)
# Set up the weighting
w = weights[mask].copy()
w /= w.sum()
A = 1/(1 - (w**2).sum())
#y_mean[i] = np.nanmean(y[mask])
y_mean[i] = (w * y[mask]).sum()
var = A*(w*(y[mask] - y_mean[i])**2).sum()
y_std[i] = np.sqrt(var)
#y_std[i] = np.std(y[use_ind])
if weighted_bins:
# Center of mass of x positions
bin_centers[i] = (w*x[mask]).sum()
y_mean = match_units(y_mean, y)[0]
y_err = y_std/np.sqrt(N)
y_err = match_units(y_err, y)[0]
y_mean[N==0] = np.nan
y_err[N==0] = np.nan
if not weighted_bins:
bin_centers = (binedges[0:-1] + binedges[1:])/2.0
binedges = match_units(binedges, x)[0]
bin_centers = match_units(bin_centers, x)[0]
else:
bin_centers[N==0] = np.nan
if ret_bin_edges:
return binedges, y_mean, y_err
else:
return bin_centers, y_mean, y_err
def heatmap(x, y, z, bins=10, plot=True, output=False):
"""
Creates a pcolor heatmap for z evaluated at (x,y). z is binned and
averaged according to x and y. x, y, and z should be 1-D arrays with the
same length.
IF bins = N, a pcolor plot of shape (N,N) is returned
IF bins = (M,N) [a tuple], a pcolor plot of shape (M,N) is returned
IF plot = True (default) a plot is created.
*** RETURNS ***
IF output = False, nothing is returned (default)
IF output = True:
Returns x_mesh, y_mesh, z_binned
x_mesh, y_mesh are the meshgrid x,y edges z is evaluted in. z_binned is
the average of z for each bin.
"""
N, x_binedges, y_binedges = np.histogram2d(x, y, bins = bins)
x_ind = np.digitize(x, x_binedges) - 1
y_ind = np.digitize(y, y_binedges) - 1
nx_bins = len(x_binedges) - 1
ny_bins = len(y_binedges) - 1
z_binned = np.zeros([nx_bins, ny_bins])
for i in range(nx_bins):
for j in range(ny_bins):
z_binned[i,j] = z[(x_ind==i) & (y_ind==j)].mean()
x_mesh, y_mesh = np.meshgrid(x_binedges, y_binedges, indexing = 'ij')
if plot:
cmap = copy.copy(matplotlib.cm.jet)
cmap.set_bad('w',1.)
masked_z = np.ma.array(z_binned, mask=np.isnan(z_binned))
plt.pcolormesh(x_mesh, y_mesh, masked_z, cmap = cmap)
plt.colorbar()
if output:
return x_mesh, y_mesh, z_binned
def configparser(fname,ftype='auto'):
"""
--------------------------------------------------
parameters = configparser(fname,ftype='auto')
Tries to parse ChaNGa configuration files
ftype can be 'auto', 'param', or 'director'. If auto, config parser will
try to determine the filetype on its own.
returns:
dictionary 'parameters'. The keys are the names of the parameters and
the values are the values defined in the file fname
--------------------------------------------------
"""
types = np.array(['param','director'])
ftype = ftype.lower()
param = {}
if ftype == 'auto':
# Try using extension to determine file type
a = fname.split('.')
ftype = a[-1].lower()
if np.sum(types == ftype) == 0:
# Could not find file type
print ('Could not determine config filetype...exiting')
return param
# Try to determine filetype
# --------------------------------------------------
# Parse param file
# --------------------------------------------------
if ftype == 'param':
farray = np.genfromtxt(fname,delimiter='=',dtype=None)
for n in range(len(farray)):
param[farray[n,0].strip()] = str2num(farray[n,1].strip())
# --------------------------------------------------
# Parse director file
# --------------------------------------------------
elif ftype == 'director':
f = open(fname,'r')
f.seek(0)
dummy = 0
for line in f:
a = line.strip().split()
if len(a) == 1:
# we're dealing with a flag
param[a[0]] = str2num(a[0])
elif len(a) > 1:
param[a[0]] = str2num(a[1:])
else:
# This is an empty line
pass
f.close()
# --------------------------------------------------
# Throw warning, return 'param' as empty
# --------------------------------------------------
else:
warnings.warn('Still cannot determine filetype.')
return param
def configsave(param,filename,ftype='auto'):
"""
--------------------------------------------------
Saves parameters defined by param (see configparser) to filename.
Possible ftypes are 'director' and 'param'. If set to auto, configsave
tries to guess file type from the extension.
--------------------------------------------------
"""
f = open(filename,'w')
types = np.array(['param','director'])
ftype = ftype.lower()
if ftype == 'auto':
# Try to figure out filetype
a = filename.split('.')
ftype = a[-1].lower()
if ftype == 'param':
pars = sorted(param.iteritems())
for n in range(len(pars)):
f.write('{0:25s}= {1}\n'.format(pars[n][0],pars[n][1]))
elif ftype == 'director':
values = param.values()
keys = param.keys()
for n in range(len(keys)):
outstr = keys[n]
if outstr == values[n]:
# We just have a flag
pass
elif isinstance(values[n],(float,int,str)):
outstr = outstr + ' {0}'.format(values[n])
else:
outstr = outstr + ' ' + ' '.join(map(str,values[n]))
f.write('{0}\n'.format(outstr))
else:
#no file type
warnings.warn('no such filetype {0}\nCould not save'.format(ftype))
f.close()
def extrap1d(x,y):
"""
Calculates a linear interpolation of x and y and does a linear
extrapolation for points outside of x and y.
Uses scipy.interpolate.interp1d
"""
# Ignore nans
ind = (~np.isnan(x)) & (~np.isnan(y))
x = x[ind]
y = y[ind]
# calculate interpolation
yspline = interp.interp1d(x,y,kind='linear')
def fcn(x0):
if hasattr(x0,'__iter__'):
mask1 = x0 < x.min()
mask2 = x0 > x.max()
out = np.zeros(len(x0))
out[mask1] = y[0] + (x0[mask1] - x[0])*(y[1]-y[0])/(x[1]-x[0])
out[mask2] = y[-1] + (x0[mask2] - x[-1])*(y[-1] - y[-2])/(x[-1] - x[-2])
mask3 = (~mask1) & (~mask2)
out[mask3] = yspline(x0[mask3])
else:
if x0 < x.min():
out = y[0] + (x0 - x[0])*(y[1]-y[0])/(x[1]-x[0])
elif x0 > x.max():
out = y[-1] + (x0 - x[-1])*(y[-1] - y[-2])/(x[-1] - x[-2])
else:
out = yspline(x0)
# Don't return an array with one element
out = float(out)
return out
return fcn
def smoothstep(x,degree=5,rescale=False):
"""
Calculates a smooth step function y(x) evaluated at the data points x.
x should be a numpy array or float.
y(x) is a polynomial of order 'degree' (default is 5). degree must be an
odd number between 3 and 25 (inclusive). The higher the order, the
sharper the step is.
y(x) is defined by:
y(0) = 0
y(1) = 1
The first (degree - 1)/2 derivatives are 0 at y = 0,1
*** ARGUMENTS ***
* x * Points at which to evaluate the smoothstep
* degree * Degree of the smooth step. Must be odd number between 3 and 25
default = 5
* rescale * Rescale x to be between 0 and 1. Default = False. If True,
x MUST be an array (greater than length 1)
*** RETURNS ***
"""
# -----------------------------------------------------------
# Load up the hermite spline (polynomial) coefficients
# -----------------------------------------------------------
fname = os.path.join(self_dir,'hermite_spline_coeffs.dat')
f =open(fname,'r')
coeffs_list = []
order_list = []
for line in f:
l = line.strip().split(',')
order_list.append(int(l[0]))
for n in range(len(l)):
l[n] = float(l[n].strip())
coeffs_list.append(np.array(l[1:],dtype='float'))
order = np.array(order_list)
coeffs = coeffs_list[(order==degree).argmax()]
# -----------------------------------------------------------
# Calculate the smooth step function y(x)
# -----------------------------------------------------------
n_coeffs = len(coeffs)
if rescale:
try:
x = (x - x.min())/(x.max() - x.min())
except:
raise RuntimeError,'Could not rescale x. Make sure x is an array'
if isinstance(x, (int, long, float, complex)):
# x is a number, handle accordingly
y = 0.0
if (x > 0) & (x < 1):
# If 0<x<1, calculate the smooth step
for n in range(n_coeffs):
y += coeffs[n] * x**(degree - n)
elif x <= 0:
y = 0.0
else:
y = 1.0
else:
# Assume x is a numpy array
y = np.zeros(x.shape)
ind = (x > 0) & (x < 1)
for n in range(n_coeffs):
y[ind] += coeffs[n] * x[ind]**(degree-n)
y[x >= 1] = 1
return y
def str2num(string):
"""
--------------------------------------------------
Tries to see if 'string' is a number
If 'string' is a string, returns:
int(string) for integers
float(string) for floats
'string' otherwise
If 'string' is a float or an integer, returns:
string
If none of the above, treats it like a list or tuple
and returns for each entry of 'string' a float,int,
or str as required. Returns as a list
--------------------------------------------------
"""
if isinstance(string,int):
output = string
elif isinstance(string,float):
output = string
elif not isinstance(string,str):
output = []
for a in string:
try:
output.append(int(a))
except:
try:
output.append(float(a))
except:
output.append(a)
if len(output) == 1:
output = output[0]
else:
output = string
try:
output = int(string)
except:
try:
output = float(string)
except:
pass
return output
def loadhalos(fname=''):
"""
Load halo (.grp) file generated from fof
Should be an ascii list of numbers, where the first row contains the
total number of particles (gas+star+dark) and the remaining rows define
which halo each particle belongs to
"""
if fname == '':
# Empty filename
pass
grp = np.loadtxt(fname,dtype=np.uint16)
grp = grp[1:] # (ignore the number of particles)
return grp
def fof(fFilter,saveDir='',minMembers=8,linklen=0.01):
"""
--------------------------------------------------
A simple script that allows you to loop through calls to fof
for many files in one directory
--------------------------------------------------
"""
flist = np.sort(glob.glob(fFilter))
nfiles = len(flist)
if (saveDir != '') and nfiles > 0:
if ~os.path.isdir(saveDir):
os.makedirs(saveDir)
for n in range(nfiles):
fname = flist[n]
outname = os.path.join(saveDir,fname)
os.system('totipnat < {0} | fof -g -m {1} -e {2} -o {3}'.format(fname,minMembers,linklen,outname))
def pbverbosity(cmd=None):
"""
Changes and returns pynbody verbosity. Works for different versions
of pynbody.
**ARGUMENTS**
cmd
-If None (default) current verbosity level is returned, nothing is done
-If 'off', pynbody is silenced
-If 'on', pynbody verbosity is set on
-If something else, cmd is assumed to be a verbosity level
**RETURNS**
current_verbosity
pynbody verbosity level before any changes were made
**EXAMPLES**
*Toggle pynbody verbosity*
current_verbosity = pbverbosity('off')
...
do stuff
...
pbverbosity(current_verbosity)
"""
# -----------------------------
# Get current verbosity level
# -----------------------------
if hasattr(pb, 'logger'):
# As of v0.30, pynbody uses python's logging to handle verbosity
logger = True
current_verbosity = pb.logger.getEffectiveLevel()
pb.logger.setLevel(logging.ERROR)
else:
# For pynbody version < 0.3, verbosity is handled in the config
logger = False
current_verbosity = pb.config['verbose']
# -----------------------------
# Change verbosity
# -----------------------------
if cmd is None:
# Don't change verbosity. just return the current verbosity
pass
elif cmd == 'off':
# Toggle verbosity off
if logger:
pb.logger.setLevel(logging.ERROR)
else:
pb.config['verbose'] = False
elif cmd == 'on':
# Toggle verbosity on
if logger:
pb.logger.setLevel(logging.DEBUG)
else:
pb.config['verbose'] = True
else:
# Set verbosity to the verbosity level specified by cmd
if logger:
pb.logger.setLevel(cmd)
else:
pb.config['verbose'] = cmd
# Return the verbosity level before any changes were made
return current_verbosity
| mit |
zuku1985/scikit-learn | examples/decomposition/plot_ica_blind_source_separation.py | 349 | 2228 | """
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
###############################################################################
# Plot results
plt.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
| bsd-3-clause |
shaneknapp/spark | python/pyspark/pandas/tests/plot/test_series_plot.py | 15 | 4133 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import pandas as pd
import numpy as np
from pyspark import pandas as ps
from pyspark.pandas.plot import PandasOnSparkPlotAccessor, BoxPlotBase
from pyspark.testing.pandasutils import have_plotly, plotly_requirement_message
class SeriesPlotTest(unittest.TestCase):
@property
def pdf1(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50]}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9, 10, 10]
)
@property
def psdf1(self):
return ps.from_pandas(self.pdf1)
@unittest.skipIf(not have_plotly, plotly_requirement_message)
def test_plot_backends(self):
plot_backend = "plotly"
with ps.option_context("plotting.backend", plot_backend):
self.assertEqual(ps.options.plotting.backend, plot_backend)
module = PandasOnSparkPlotAccessor._get_plot_backend(plot_backend)
self.assertEqual(module.__name__, "pyspark.pandas.plot.plotly")
def test_plot_backends_incorrect(self):
fake_plot_backend = "none_plotting_module"
with ps.option_context("plotting.backend", fake_plot_backend):
self.assertEqual(ps.options.plotting.backend, fake_plot_backend)
with self.assertRaises(ValueError):
PandasOnSparkPlotAccessor._get_plot_backend(fake_plot_backend)
def test_box_summary(self):
def check_box_summary(psdf, pdf):
k = 1.5
stats, fences = BoxPlotBase.compute_stats(psdf["a"], "a", whis=k, precision=0.01)
outliers = BoxPlotBase.outliers(psdf["a"], "a", *fences)
whiskers = BoxPlotBase.calc_whiskers("a", outliers)
fliers = BoxPlotBase.get_fliers("a", outliers, whiskers[0])
expected_mean = pdf["a"].mean()
expected_median = pdf["a"].median()
expected_q1 = np.percentile(pdf["a"], 25)
expected_q3 = np.percentile(pdf["a"], 75)
iqr = expected_q3 - expected_q1
expected_fences = (expected_q1 - k * iqr, expected_q3 + k * iqr)
pdf["outlier"] = ~pdf["a"].between(fences[0], fences[1])
expected_whiskers = (
pdf.query("not outlier")["a"].min(),
pdf.query("not outlier")["a"].max(),
)
expected_fliers = pdf.query("outlier")["a"].values
self.assertEqual(expected_mean, stats["mean"])
self.assertEqual(expected_median, stats["med"])
self.assertEqual(expected_q1, stats["q1"] + 0.5)
self.assertEqual(expected_q3, stats["q3"] - 0.5)
self.assertEqual(expected_fences[0], fences[0] + 2.0)
self.assertEqual(expected_fences[1], fences[1] - 2.0)
self.assertEqual(expected_whiskers[0], whiskers[0])
self.assertEqual(expected_whiskers[1], whiskers[1])
self.assertEqual(expected_fliers, fliers)
check_box_summary(self.psdf1, self.pdf1)
check_box_summary(-self.psdf1, -self.pdf1)
if __name__ == "__main__":
from pyspark.pandas.tests.plot.test_series_plot import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
sealhuang/brainCodingToolbox | braincode/prf/vim2_plsreg.py | 3 | 11392 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os
import numpy as np
from scipy import ndimage
from scipy.misc import imsave
from sklearn.cross_decomposition import PLSRegression
from sklearn.externals import joblib
from braincode.util import configParser
from braincode.math import corr2_coef
from braincode.math import get_pls_components, pls_regression_predict
from braincode.pipeline import retinotopy
from braincode.prf import util as vutil
from braincode.prf import dataio
def check_path(dir_path):
"""Check whether the directory does exist, if not, create it."""
if not os.path.exists(dir_path):
os.mkdir(dir_path, 0755)
def retinotopic_mapping(corr_file, data_dir, vxl_idx=None, figout=False):
"""Make the retinotopic mapping using activation map from CNN."""
if figout:
fig_dir = os.path.join(data_dir, 'fig')
check_path(fig_dir)
# load the cross-correlation matrix from file
corr_mtx = np.load(corr_file, mmap_mode='r')
# set voxel index
if not isinstance(vxl_idx, np.ndarray):
vxl_idx = np.arange(corr_mtx.shape[0])
elif len(vxl_idx) != corr_mtx.shape[0]:
print 'mismatch on voxel number!'
return
else:
print 'voxel index loaded.'
img_size = 55.0
pos_mtx = np.zeros((73728, 2))
pos_mtx[:] = np.nan
for i in range(len(vxl_idx)):
print 'Iter %s of %s' %(i+1, len(vxl_idx)),
tmp = corr_mtx[i, :]
tmp = np.nan_to_num(np.array(tmp))
# significant threshold for one-tail test
tmp[tmp <= 0.019257] = 0
if np.sum(tmp):
mmtx = tmp.reshape(55, 55)
#tmp = tmp.reshape(96, 27, 27)
#mmtx = np.max(tmp, axis=0)
print mmtx.min(), mmtx.max()
if figout:
fig_file = os.path.join(fig_dir, 'v'+str(vxl_idx[i])+'.png')
imsave(fig_file, mmtx)
# get indices of n maximum values
max_n = 20
row_idx, col_idx = np.unravel_index(
np.argsort(mmtx.ravel())[-1*max_n:],
mmtx.shape)
nmtx = np.zeros(mmtx.shape)
nmtx[row_idx, col_idx] = mmtx[row_idx, col_idx]
# center of mass
x, y = ndimage.measurements.center_of_mass(nmtx)
pos_mtx[vxl_idx[i], :] = [x, y]
else:
print ' '
#receptive_field_file = os.path.join(data_dir, 'receptive_field_pos.npy')
#np.save(receptive_field_file, pos_mtx)
#pos_mtx = np.load(receptive_field_file)
# generate retinotopic mapping
base_name = 'train_max' + str(max_n)
prf2visual_angle(pos_mtx, img_size, data_dir, base_name)
def prf2visual_angle(prf_mtx, img_size, out_dir, base_name):
"""Generate retinotopic mapping based on voxels' pRF parameters.
`prf_mtx` is a #voxel x pRF-features matrix, pRF features can be 2 columns
(row, col) of image or 3 columns which adding a third pRF size parameters.
"""
feature_size = prf_mtx.shape[1]
pos_mtx = prf_mtx[:, :2]
# eccentricity
ecc = retinotopy.coord2ecc(pos_mtx, img_size, 20)
vol = ecc.reshape(18, 64, 64)
vutil.save2nifti(vol, os.path.join(out_dir, base_name+'_ecc.nii.gz'))
# angle
angle = retinotopy.coord2angle(pos_mtx, img_size)
vol = angle.reshape(18, 64, 64)
vutil.save2nifti(vol, os.path.join(out_dir, base_name+'_angle.nii.gz'))
# pRF size
if feature_size > 2:
size_angle = retinotopy.get_prf_size(prf_mtx, 55, 20)
vol = size_angle.reshape(18, 64, 64)
vutil.save2nifti(vol, os.path.join(out_dir, base_name+'_size.nii.gz'))
def pls_y_pred_x(plsca, Y):
"""Predict X based on Y using a trained PLS CCA model `plsca`.
"""
coef_ = np.dot(plsca.y_rotations_, plsca.x_loadings_.T)
coef_ = (1./plsca.y_std_.reshape((plsca.y_weights_.shape[0], 1)) * coef_ *
plsca.x_std_)
# Normalize
Yk = Y - plsca.y_mean_
Yk /= plsca.y_std_
Xpred = np.dot(Y, coef_)
return Xpred + plsca.x_mean_
def plscorr_eval(train_fmri_ts, train_feat_ts, val_fmri_ts, val_feat_ts,
out_dir, mask_file):
"""Compute PLS correlation between brain activity and CNN activation."""
train_feat_ts = train_feat_ts.reshape(-1, train_feat_ts.shape[3]).T
val_feat_ts = val_feat_ts.reshape(-1, val_feat_ts.shape[3]).T
train_fmri_ts = train_fmri_ts.T
val_fmri_ts = val_fmri_ts.T
# Iteration loop for different component number
#for n in range(5, 19):
# print '--- Components number %s ---' %(n)
# plsca = PLSCanonical(n_components=n)
# plsca.fit(train_feat_ts, train_fmri_ts)
# pred_feat_c, pred_fmri_c = plsca.transform(val_feat_ts, val_fmri_ts)
# pred_fmri_ts = plsca.predict(val_feat_ts)
# # calculate correlation coefficient between truth and prediction
# r = corr2_coef(val_fmri_ts.T, pred_fmri_ts.T, mode='pair')
# # get top 20% corrcoef for model evaluation
# vsample = int(np.rint(0.2*len(r)))
# print 'Sample size for evaluation : %s' % (vsample)
# r.sort()
# meanr = np.mean(r[-1*vsample:])
# print 'Mean prediction corrcoef : %s' %(meanr)
# model generation based on optimized CC number
cc_num = 10
plsca = PLSCanonical(n_components=cc_num)
plsca.fit(train_feat_ts, train_fmri_ts)
from sklearn.externals import joblib
joblib.dump(plsca, os.path.join(out_dir, 'plsca_model.pkl'))
plsca = joblib.load(os.path.join(out_dir, 'plsca_model.pkl'))
# calculate correlation coefficient between truth and prediction
pred_fmri_ts = plsca.predict(val_feat_ts)
fmri_pred_r = corr2_coef(val_fmri_ts.T, pred_fmri_ts.T, mode='pair')
mask = vutil.data_swap(mask_file)
vxl_idx = np.nonzero(mask.flatten()==1)[0]
tmp = np.zeros_like(mask.flatten(), dtype=np.float64)
tmp[vxl_idx] = fmri_pred_r
tmp = tmp.reshape(mask.shape)
vutil.save2nifti(tmp, os.path.join(out_dir, 'pred_fmri_r.nii.gz'))
pred_feat_ts = pls_y_pred_x(plsca, val_fmri_ts)
pred_feat_ts = pred_feat_ts.T.reshape(96, 14, 14, 540)
np.save(os.path.join(out_dir, 'pred_feat.npy'), pred_feat_ts)
# get PLS-CCA weights
feat_cc, fmri_cc = plsca.transform(train_feat_ts, train_fmri_ts)
np.save(os.path.join(out_dir, 'feat_cc.npy'), feat_cc)
np.save(os.path.join(out_dir, 'fmri_cc.npy'), fmri_cc)
feat_weight = plsca.x_weights_.reshape(96, 14, 14, cc_num)
#feat_weight = plsca.x_weights_.reshape(96, 11, 11, cc_num)
fmri_weight = plsca.y_weights_
np.save(os.path.join(out_dir, 'feat_weights.npy'), feat_weight)
np.save(os.path.join(out_dir, 'fmri_weights.npy'), fmri_weight)
fmri_orig_ccs = get_pls_components(plsca.y_scores_, plsca.y_loadings_)
np.save(os.path.join(out_dir, 'fmri_orig_ccs.npy'), fmri_orig_ccs)
def inter_subj_cc_sim(subj1_id, subj2_id, subj_dir):
"""Compute inter-subjects CCs similarity."""
subj1_dir = os.path.join(subj_dir, 'vS%s'%(subj1_id))
subj2_dir = os.path.join(subj_dir, 'vS%s'%(subj2_id))
#-- inter-channel similarity
feat_weights_file1 = os.path.join(subj1_dir, 'plscca',
'layer1', 'feat_weights.npy')
feat_weights_file2 = os.path.join(subj2_dir, 'plscca',
'layer1', 'feat_weights.npy')
feat_cc_corr1 = np.load(feat_cc_corr_file1).reshape(96, 121, 10)
feat_cc_corr2 = np.load(feat_cc_corr_file2).reshape(96, 121, 10)
sim_mtx = np.zeros((960, 960))
for i in range(10):
data1 = feat_cc_corr1[..., i]
for j in range(10):
data2 = feat_cc_corr2[..., j]
tmp = corr2_coef(data1, data2)
sim_mtx[i*96:(i+1)*96, j*96:(j+1)*96] = np.abs(tmp)
np.save('feat_cc_weights_sim_subj_%s_%s.npy'%(subj1_id, subj2_id), sim_mtx)
#-- inter-CC similarity
#feat_cc_corr_file1 = os.path.join(subj1_dir, 'plscca',
# 'layer1', 'feat_cc_corr.npy')
#feat_cc_corr_file2 = os.path.join(subj2_dir, 'plscca',
# 'layer1', 'feat_cc_corr.npy')
#feat_cc_corr1 = np.load(feat_cc_corr_file1).reshape(96, 11, 11, 10)
#feat_cc_corr2 = np.load(feat_cc_corr_file2).reshape(96, 11, 11, 10)
#avg_weights1 = vutil.fweights_top_mean(feat_cc_corr1, 0.2)
#avg_weights2 = vutil.fweights_top_mean(feat_cc_corr2, 0.2)
#sim_mtx = corr2_coef(avg_weights1, avg_weights2)
#np.save('feat_cc_sim_subj_%s_%s.npy'%(subj1_id, subj2_id), sim_mtx)
pass
if __name__ == '__main__':
"""Main function."""
# config parser
cf = configParser.Config('config')
# directory config for database
db_dir = cf.get('database', 'path')
db_dir = os.path.join(db_dir, 'vim2')
# directory config for analysis
root_dir = cf.get('base', 'path')
feat_dir = os.path.join(root_dir, 'sfeatures', 'vim2')
res_dir = os.path.join(root_dir, 'subjects')
#-- general config config
subj_id = 1
subj_dir = os.path.join(res_dir, 'vim2_S%s'%(subj_id))
pls_dir = os.path.join(subj_dir, 'pls')
prf_dir = os.path.join(subj_dir, 'prf', 'gaussian_kernel')
check_path(pls_dir)
#-- load fmri data
fmri_file = os.path.join(prf_dir, 'roi_orig_fmri.npz')
fmri_data = np.load(fmri_file)
#-- load gabor feats: data.shape = (x, y, feature_size, 7200/540)
train_feat_file = os.path.join(feat_dir, 'train_gabor_trs_scale.npy')
train_feat_ts = np.load(train_feat_file, mmap_mode='r')
val_feat_file = os.path.join(feat_dir, 'val_gabor_trs_scale.npy')
val_feat_ts = np.load(val_feat_file, mmap_mode='r')
#-- PLS regression
train_feat = train_feat_ts.reshape(-1, 7200).T
train_fmri = fmri_data['train_ts'].T
print 'PLS model initializing ...'
comps = 20
pls2 = PLSRegression(n_components=comps)
pls2.fit(train_fmri, train_feat)
#pls2.fit(train_feat, train_fmri)
joblib.dump(pls2, os.path.join(pls_dir, 'pls_model_c%s.pkl'%(comps)))
for i in range(comps):
print 'Component %s'%(i+1)
print np.corrcoef(pls2.x_scores_[:, i], pls2.y_scores_[:, i])
# get predicted fmri response based on PLS model
#pred_train_fmri = pls_regression_predict(pls2, train_feat)
pred_train_feat = pls_regression_predict(pls2, train_fmri)
#pred_val_fmri = pls_regression_predict(pls2, val_feat_ts.reshape(-1, 540).T)
pred_val_feat = pls_regression_predict(pls2, fmri_data['val_ts'].T)
pred_file = os.path.join(pls_dir, 'pls_pred_feat_c%s'%(comps))
np.savez(pred_file, pred_train=pred_train_feat, pred_val=pred_val_feat)
#-- visualize PLS weights
#pls_model_file = os.path.join(pls_dir, 'pls_model_c20.pkl')
#pls2 = joblib.load(pls_model_file)
# plot feature weights for each component of PLS
#xwts = pls2.x_weights_.reshape(128, 128, 5, -1)
#vutil.plot_pls_fweights(xwts, pls_dir, 'feat_weights')
# save fmri weights for each component of PLS as nifti file
#vxl_idx = fmri_data['vxl_idx']
#ywts = pls2.y_weights_
#for c in range(ywts.shape[1]):
# vxl_data = ywts[:, c]
# outfile = os.path.join(pls_dir, 'fmri_weights_C%s.nii.gz'%(c+1))
# vutil.vxl_data2nifti(vxl_data, vxl_idx, outfile)
# compute corrcoef of local ans global brain activity
| bsd-3-clause |
3manuek/scikit-learn | sklearn/metrics/metrics.py | 233 | 1262 | import warnings
warnings.warn("sklearn.metrics.metrics is deprecated and will be removed in "
"0.18. Please import from sklearn.metrics",
DeprecationWarning)
from .ranking import auc
from .ranking import average_precision_score
from .ranking import label_ranking_average_precision_score
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
| bsd-3-clause |
mwv/scikit-learn | examples/linear_model/plot_polynomial_interpolation.py | 251 | 1895 | #!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
plt.plot(x_plot, f(x_plot), label="ground truth")
plt.scatter(x, y, label="training points")
for degree in [3, 4, 5]:
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
| bsd-3-clause |
awni/tensorflow | tensorflow/contrib/skflow/python/skflow/tests/test_nonlinear.py | 1 | 5127 | # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
from sklearn import datasets
from sklearn.metrics import accuracy_score, mean_squared_error
import tensorflow as tf
from tensorflow.contrib.skflow.python import skflow
class NonLinearTest(tf.test.TestCase):
def testIrisDNN(self):
random.seed(42)
iris = datasets.load_iris()
classifier = skflow.TensorFlowDNNClassifier(
hidden_units=[10, 20, 10], n_classes=3)
classifier.fit(iris.data, iris.target)
score = accuracy_score(iris.target, classifier.predict(iris.data))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
weights = classifier.weights_
self.assertEqual(weights[0].shape, (4, 10))
self.assertEqual(weights[1].shape, (10, 20))
self.assertEqual(weights[2].shape, (20, 10))
self.assertEqual(weights[3].shape, (10, 3))
biases = classifier.bias_
self.assertEqual(len(biases), 4)
def testBostonDNN(self):
random.seed(42)
boston = datasets.load_boston()
regressor = skflow.TensorFlowDNNRegressor(
hidden_units=[10, 20, 10], n_classes=0,
batch_size=boston.data.shape[0],
steps=200, learning_rate=0.001)
regressor.fit(boston.data, boston.target)
score = mean_squared_error(
boston.target, regressor.predict(boston.data))
self.assertLess(score, 100, "Failed with score = {0}".format(score))
weights = regressor.weights_
self.assertEqual(weights[0].shape, (13, 10))
self.assertEqual(weights[1].shape, (10, 20))
self.assertEqual(weights[2].shape, (20, 10))
self.assertEqual(weights[3].shape, (10, 1))
biases = regressor.bias_
self.assertEqual(len(biases), 4)
def testRNN(self):
random.seed(42)
import numpy as np
data = np.array(list([[2, 1, 2, 2, 3],
[2, 2, 3, 4, 5],
[3, 3, 1, 2, 1],
[2, 4, 5, 4, 1]]), dtype=np.float32)
# labels for classification
labels = np.array(list([1, 0, 1, 0]), dtype=np.float32)
# targets for regression
targets = np.array(list([10, 16, 10, 16]), dtype=np.float32)
test_data = np.array(list([[1, 3, 3, 2, 1], [2, 3, 4, 5, 6]]))
def input_fn(X):
return tf.split(1, 5, X)
# Classification
classifier = skflow.TensorFlowRNNClassifier(
rnn_size=2, cell_type='lstm', n_classes=2, input_op_fn=input_fn)
classifier.fit(data, labels)
classifier.weights_
classifier.bias_
predictions = classifier.predict(test_data)
self.assertAllClose(predictions, np.array([1, 0]))
classifier = skflow.TensorFlowRNNClassifier(
rnn_size=2, cell_type='rnn', n_classes=2,
input_op_fn=input_fn, num_layers=2)
classifier.fit(data, labels)
classifier = skflow.TensorFlowRNNClassifier(
rnn_size=2, cell_type='invalid_cell_type', n_classes=2,
input_op_fn=input_fn, num_layers=2)
with self.assertRaises(ValueError):
classifier.fit(data, labels)
# Regression
regressor = skflow.TensorFlowRNNRegressor(
rnn_size=2, cell_type='gru', input_op_fn=input_fn)
regressor.fit(data, targets)
regressor.weights_
regressor.bias_
predictions = regressor.predict(test_data)
def testBidirectionalRNN(self):
random.seed(42)
import numpy as np
data = np.array(list([[2, 1, 2, 2, 3],
[2, 2, 3, 4, 5],
[3, 3, 1, 2, 1],
[2, 4, 5, 4, 1]]), dtype=np.float32)
labels = np.array(list([1, 0, 1, 0]), dtype=np.float32)
def input_fn(X):
return tf.split(1, 5, X)
# Classification
classifier = skflow.TensorFlowRNNClassifier(
rnn_size=2, cell_type='lstm', n_classes=2, input_op_fn=input_fn,
bidirectional=True)
classifier.fit(data, labels)
predictions = classifier.predict(np.array(list([[1, 3, 3, 2, 1],
[2, 3, 4, 5, 6]])))
self.assertAllClose(predictions, np.array([1, 0]))
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
rcomer/iris | docs/gallery_code/oceanography/plot_load_nemo.py | 3 | 2120 | """
Load a Time Series of Data From the NEMO Model
==============================================
This example demonstrates how to load multiple files containing data output by
the NEMO model and combine them into a time series in a single cube. The
different time dimensions in these files can prevent Iris from concatenating
them without the intervention shown here.
"""
from __future__ import unicode_literals
import matplotlib.pyplot as plt
import iris
import iris.plot as iplt
import iris.quickplot as qplt
from iris.util import promote_aux_coord_to_dim_coord
def main():
# Load the three files of sample NEMO data.
fname = iris.sample_data_path("NEMO/nemo_1m_*.nc")
cubes = iris.load(fname)
# Some attributes are unique to each file and must be blanked
# to allow concatenation.
differing_attrs = ["file_name", "name", "timeStamp", "TimeStamp"]
for cube in cubes:
for attribute in differing_attrs:
cube.attributes[attribute] = ""
# The cubes still cannot be concatenated because their time dimension is
# time_counter rather than time. time needs to be promoted to allow
# concatenation.
for cube in cubes:
promote_aux_coord_to_dim_coord(cube, "time")
# The cubes can now be concatenated into a single time series.
cube = cubes.concatenate_cube()
# Generate a time series plot of a single point
plt.figure()
y_point_index = 100
x_point_index = 100
qplt.plot(cube[:, y_point_index, x_point_index], "o-")
# Include the point's position in the plot's title
lat_point = cube.coord("latitude").points[y_point_index, x_point_index]
lat_string = "{:.3f}\u00B0 {}".format(
abs(lat_point), "N" if lat_point > 0.0 else "S"
)
lon_point = cube.coord("longitude").points[y_point_index, x_point_index]
lon_string = "{:.3f}\u00B0 {}".format(
abs(lon_point), "E" if lon_point > 0.0 else "W"
)
plt.title(
"{} at {} {}".format(
cube.long_name.capitalize(), lat_string, lon_string
)
)
iplt.show()
if __name__ == "__main__":
main()
| lgpl-3.0 |
Unidata/MetPy | v0.4/_downloads/Point_Interpolation.py | 3 | 5082 | # Copyright (c) 2008-2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Point Interpolation
===================
Compares different point interpolation approaches.
"""
import cartopy
import cartopy.crs as ccrs
from matplotlib.colors import BoundaryNorm
import matplotlib.pyplot as plt
import numpy as np
from metpy.cbook import get_test_data
from metpy.gridding.gridding_functions import (interpolate, remove_nan_observations,
remove_repeat_coordinates)
###########################################
def basic_map(map_proj):
"""Make our basic default map for plotting"""
fig = plt.figure(figsize=(15, 10))
view = fig.add_axes([0, 0, 1, 1], projection=to_proj)
view.set_extent([-120, -70, 20, 50])
view.add_feature(cartopy.feature.NaturalEarthFeature(category='cultural',
name='admin_1_states_provinces_lakes',
scale='50m', facecolor='none'))
view.add_feature(cartopy.feature.OCEAN)
view.add_feature(cartopy.feature.COASTLINE)
view.add_feature(cartopy.feature.BORDERS, linestyle=':')
return view
def station_test_data(variable_names, proj_from=None, proj_to=None):
f = get_test_data('station_data.txt')
all_data = np.loadtxt(f, skiprows=1, delimiter=',',
usecols=(1, 2, 3, 4, 5, 6, 7, 17, 18, 19),
dtype=np.dtype([('stid', '3S'), ('lat', 'f'), ('lon', 'f'),
('slp', 'f'), ('air_temperature', 'f'),
('cloud_fraction', 'f'), ('dewpoint', 'f'),
('weather', '16S'),
('wind_dir', 'f'), ('wind_speed', 'f')]))
all_stids = [s.decode('ascii') for s in all_data['stid']]
data = np.concatenate([all_data[all_stids.index(site)].reshape(1, ) for site in all_stids])
value = data[variable_names]
lon = data['lon']
lat = data['lat']
if proj_from is not None and proj_to is not None:
try:
proj_points = proj_to.transform_points(proj_from, lon, lat)
return proj_points[:, 0], proj_points[:, 1], value
except Exception as e:
print(e)
return None
return lon, lat, value
from_proj = ccrs.Geodetic()
to_proj = ccrs.AlbersEqualArea(central_longitude=-97.0000, central_latitude=38.0000)
levels = list(range(-20, 20, 1))
cmap = plt.get_cmap('magma')
norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)
x, y, temp = station_test_data('air_temperature', from_proj, to_proj)
x, y, temp = remove_nan_observations(x, y, temp)
x, y, temp = remove_repeat_coordinates(x, y, temp)
###########################################
# Scipy.interpolate linear
# ------------------------
gx, gy, img = interpolate(x, y, temp, interp_type='linear', hres=75000)
img = np.ma.masked_where(np.isnan(img), img)
view = basic_map(to_proj)
mmb = view.pcolormesh(gx, gy, img, cmap=cmap, norm=norm)
plt.colorbar(mmb, shrink=.4, pad=0, boundaries=levels)
###########################################
# Natural neighbor interpolation (MetPy implementation)
# -----------------------------------------------------
# `Reference <https://github.com/Unidata/MetPy/files/138653/cwp-657.pdf>`_
gx, gy, img = interpolate(x, y, temp, interp_type='natural_neighbor', hres=75000)
img = np.ma.masked_where(np.isnan(img), img)
view = basic_map(to_proj)
mmb = view.pcolormesh(gx, gy, img, cmap=cmap, norm=norm)
plt.colorbar(mmb, shrink=.4, pad=0, boundaries=levels)
###########################################
# Cressman interpolation
# ----------------------
# search_radius = 100 km
#
# grid resolution = 25 km
#
# min_neighbors = 1
gx, gy, img = interpolate(x, y, temp, interp_type='cressman', minimum_neighbors=1, hres=75000,
search_radius=100000)
img = np.ma.masked_where(np.isnan(img), img)
view = basic_map(to_proj)
mmb = view.pcolormesh(gx, gy, img, cmap=cmap, norm=norm)
plt.colorbar(mmb, shrink=.4, pad=0, boundaries=levels)
###########################################
# Barnes Interpolation
# --------------------
# search_radius = 100km
#
# min_neighbors = 3
gx, gy, img1 = interpolate(x, y, temp, interp_type='barnes', hres=75000, search_radius=100000)
img1 = np.ma.masked_where(np.isnan(img1), img1)
view = basic_map(to_proj)
mmb = view.pcolormesh(gx, gy, img1, cmap=cmap, norm=norm)
plt.colorbar(mmb, shrink=.4, pad=0, boundaries=levels)
###########################################
# Radial basis function interpolation
# ------------------------------------
# linear
gx, gy, img = interpolate(x, y, temp, interp_type='rbf', hres=75000, rbf_func='linear',
rbf_smooth=0)
img = np.ma.masked_where(np.isnan(img), img)
view = basic_map(to_proj)
mmb = view.pcolormesh(gx, gy, img, cmap=cmap, norm=norm)
plt.colorbar(mmb, shrink=.4, pad=0, boundaries=levels)
plt.show()
| bsd-3-clause |
dsquareindia/scikit-learn | examples/feature_stacker.py | 80 | 1911 | """
=================================================
Concatenating multiple feature extraction methods
=================================================
In many real-world examples, there are many ways to extract features from a
dataset. Often it is beneficial to combine several methods to obtain good
performance. This example shows how to use ``FeatureUnion`` to combine
features obtained by PCA and univariate selection.
Combining features using this transformer has the benefit that it allows
cross validation and grid searches over the whole process.
The combination used in this example is not particularly helpful on this
dataset and is only used to illustrate the usage of FeatureUnion.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
iris = load_iris()
X, y = iris.data, iris.target
# This dataset is way too high-dimensional. Better do PCA:
pca = PCA(n_components=2)
# Maybe some original features where good, too?
selection = SelectKBest(k=1)
# Build estimator from PCA and Univariate selection:
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
# Use combined features to transform dataset:
X_features = combined_features.fit(X, y).transform(X)
svm = SVC(kernel="linear")
# Do grid search over k, n_components and C:
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
param_grid = dict(features__pca__n_components=[1, 2, 3],
features__univ_select__k=[1, 2],
svm__C=[0.1, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)
grid_search.fit(X, y)
print(grid_search.best_estimator_)
| bsd-3-clause |
stelfrich/openmicroscopy | components/tools/OmeroPy/test/unit/test_jvmcfg.py | 2 | 7569 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2015 Glencoe Software, Inc. All Rights Reserved.
# Use is subject to license terms supplied in LICENSE.txt
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test of the automatic JVM setting logic for OMERO startup.
"""
import pytest
from omero.config import ConfigXml, xml
from omero.install.jvmcfg import adjust_settings
from omero.install.jvmcfg import ManualStrategy
from omero.install.jvmcfg import PercentStrategy
from omero.install.jvmcfg import Settings
from omero.install.jvmcfg import Strategy
from omero.install.jvmcfg import strip_dict
from omero.install.jvmcfg import usage_charts
from omero.util.temp_files import create_path
from path import path
from xml.etree.ElementTree import SubElement
from xml.etree.ElementTree import tostring
from xml.etree.ElementTree import XML
from test.unit.test_config import initial
def write_config(data):
p = create_path()
i = initial()
for k, v in data.items():
for x in i[0:2]: # __ACTIVE__ & default
SubElement(x, "property", name=k, value=v)
string = tostring(i, 'utf-8')
txt = xml.dom.minidom.parseString(string).toprettyxml(" ", "\n", None)
p.write_text(txt)
return p
class TestMemoryStrip(object):
def test_1(self):
rv = strip_dict({"a.b": "c"}, prefix="a")
assert {"b": "c"} == rv
def test_2(self):
rv = strip_dict({"a.b.c": "d"}, prefix="a.b")
assert rv["c"] == "d"
def test_3(self):
rv = strip_dict({
"omero.jvmcfg.foo": "a",
"something.else": "b"})
assert rv["foo"] == "a"
assert "something.else" not in rv
@pytest.mark.parametrize("input,output", (
({"omero.jvmcfg.heap_size.blitz": "1g"}, {"heap_size": "1g"}),
))
def test_4(self, input, output):
p = write_config(input)
config = ConfigXml(filename=str(p), env_config="default")
try:
m = config.as_map()
s = strip_dict(m, suffix="blitz")
assert s == output
finally:
config.close()
def test_5(self):
rv = strip_dict({
"omero.jvmcfg.a.blitz": "b",
}, suffix="blitz")
assert rv["a"] == "b"
class TestSettings(object):
def test_initial(self):
s = Settings()
assert s.perm_gen == "128m"
assert s.heap_dump == "off"
assert s.heap_size == "512m"
def test_explicit(self):
s = Settings({
"perm_gen": "xxx",
"heap_dump": "yyy",
"heap_size": "zzz",
})
assert s.perm_gen == "xxx"
assert s.heap_dump == "yyy"
assert s.heap_size == "zzz"
def test_defaults(self):
s = Settings({}, {
"perm_gen": "xxx",
"heap_dump": "yyy",
"heap_size": "zzz",
})
assert s.perm_gen == "xxx"
assert s.heap_dump == "yyy"
assert s.heap_size == "zzz"
def test_both(self):
s = Settings({
"perm_gen": "aaa",
"heap_dump": "bbb",
"heap_size": "ccc",
}, {
"perm_gen": "xxx",
"heap_dump": "yyy",
"heap_size": "zzz",
})
assert s.perm_gen == "aaa"
assert s.heap_dump == "bbb"
assert s.heap_size == "ccc"
class TestStrategy(object):
def test_no_instantiate(self):
with pytest.raises(Exception):
Strategy("blitz")
def test_hard_coded(self):
strategy = ManualStrategy("blitz")
settings = strategy.get_memory_settings()
assert settings == [
"-Xmx512m",
"-XX:MaxPermSize=128m",
"-XX:+IgnoreUnrecognizedVMOptions",
]
def test_percent_usage(self):
strategy = PercentStrategy("blitz")
table = list(strategy.usage_table(15, 16))[0]
assert table[0] == 2**15
assert table[1] == 2**15*15/100
def test_heap_dump_on(self):
settings = Settings({"heap_dump": "on"})
strategy = PercentStrategy("blitz", settings)
hd = strategy.get_heap_dump()
append = strategy.get_append()
assert " " not in hd
assert "HeapDumpPath" not in hd
assert not append
def test_heap_dump_tmp(self):
settings = Settings({"heap_dump": "tmp"})
strategy = PercentStrategy("blitz", settings)
hd = strategy.get_heap_dump()
append = strategy.get_append()
assert " " not in hd
assert "HeapDumpPath" not in hd
assert "HeapDumpPath" in "".join(append)
class AdjustFixture(object):
def __init__(self, input, output, name, **kwargs):
self.input = input
self.output = output
self.name = name
self.kwargs = kwargs
def validate(self, rv):
for k, v in self.output.items():
assert k in rv
found = rv[k]
found.pop(0) # settings
assert v == found, "%s.%s: %s <> %s" % (self.name, k,
v, found)
import json
f = open(__file__[:-3] + ".json", "r")
data = json.load(f)
AFS = []
for x in data:
AFS.append(AdjustFixture(x["input"], x["output"], x["name"]))
def template_xml():
templates = path(__file__) / ".." / ".." / ".."
templates = templates / ".." / ".." / ".."
templates = templates / "etc" / "grid" / "templates.xml"
templates = templates.abspath()
return XML(templates.text())
class TestAdjustStrategy(object):
@pytest.mark.parametrize("fixture", AFS, ids=[x.name for x in AFS])
def test_adjust(self, fixture, monkeypatch):
monkeypatch.setattr(Strategy, '_system_memory_mb_java',
lambda x: (2000, 4000))
p = write_config(fixture.input)
xml = template_xml()
config = ConfigXml(filename=str(p), env_config="default")
try:
rv = adjust_settings(config, xml, **fixture.kwargs)
fixture.validate(rv)
finally:
config.close()
@pytest.mark.parametrize("fixture", AFS, ids=[x.name for x in AFS])
def test_12527(self, fixture, monkeypatch):
monkeypatch.setattr(Strategy, '_system_memory_mb_java',
lambda x: (2000, 4000))
p = write_config(fixture.input)
old_templates = path(__file__).dirname() / "old_templates.xml"
xml = XML(old_templates.abspath().text())
config = ConfigXml(filename=str(p), env_config="default")
with pytest.raises(Exception):
adjust_settings(config, xml, **fixture.kwargs)
class TestChart(object):
def test_percent_chart(self):
try:
usage_charts("target/charts.png")
except ImportError:
# Requires matplotlib, etc
pass
| gpl-2.0 |
compas-dev/compas | src/compas_plotters/meshplotter.py | 1 | 12711 | from matplotlib.patches import Circle
from matplotlib.patches import Polygon
from compas.utilities import color_to_rgb
from compas.utilities import pairwise
from compas_plotters._plotter import BasePlotter, valuedict
__all__ = ['MeshPlotter']
class MeshPlotter(BasePlotter):
"""Plotter for the visualization of COMPAS meshes.
Parameters
----------
mesh: object
The mesh to plot.
Attributes
----------
title : str
Title of the plot.
mesh : object
The mesh to plot.
vertexcollection : object
The matplotlib collection for the mesh vertices.
edgecollection : object
The matplotlib collection for the mesh edges.
facecollection : object
The matplotlib collection for the mesh faces.
defaults : dict
Dictionary containing default attributes for vertices and edges.
Examples
--------
This is a basic example using the default settings for all visualization options.
For more detailed examples, see the documentation of the various drawing methods
listed below...
.. plot::
:include-source:
import compas
from compas.datastructures import Mesh
from compas_plotters import MeshPlotter
mesh = Mesh.from_obj(compas.get('faces.obj'))
plotter = MeshPlotter(mesh)
plotter.draw_vertices(text='key', radius=0.15)
plotter.draw_edges()
plotter.draw_faces()
plotter.show()
Notes
-----
For more info about ``matplotlib``, see [1]_.
References
----------
.. [1] Hunter, J. D., 2007. *Matplotlib: A 2D graphics environment*.
Computing In Science & Engineering (9) 3, p.90-95.
Available at: http://ieeexplore.ieee.org/document/4160265/citations.
"""
def __init__(self, mesh, **kwargs):
super().__init__(**kwargs)
self.title = 'MeshPlotter'
self.mesh = mesh
self.vertexcollection = None
self.edgecollection = None
self.facecollection = None
self.defaults = {
'vertex.radius': 0.1,
'vertex.facecolor': '#ffffff',
'vertex.edgecolor': '#000000',
'vertex.edgewidth': 0.5,
'vertex.textcolor': '#000000',
'vertex.fontsize': kwargs.get('fontsize', 10),
'edge.width': 1.0,
'edge.color': '#000000',
'edge.textcolor': '#000000',
'edge.fontsize': kwargs.get('fontsize', 10),
'face.facecolor': '#eeeeee',
'face.edgecolor': '#000000',
'face.edgewidth': 0.1,
'face.textcolor': '#000000',
'face.fontsize': kwargs.get('fontsize', 10),
}
def clear(self):
"""Clears the mesh plotter vertices, edges and faces."""
self.clear_vertices()
self.clear_edges()
self.clear_faces()
def draw_vertices(self, keys=None, radius=None, text=None,
facecolor=None, edgecolor=None, edgewidth=None,
textcolor=None, fontsize=None, picker=None):
"""Draws the mesh vertices.
Parameters
----------
keys : list
The keys of the vertices to plot.
radius : {float, dict}
A list of radii for the vertices.
text : {{'index', 'key'}, str, dict}
Strings to be displayed on the vertices.
facecolor : {color, dict}
Color for the vertex circle fill.
edgecolor : {color, dict}
Color for the vertex circle edge.
edgewidth : {float, dict}
Width for the vertex circle edge.
textcolor : {color, dict}
Color for the text to be displayed on the vertices.
fontsize : {int, dict}
Font size for the text to be displayed on the vertices.
Returns
-------
object
The matplotlib vertex collection object.
"""
keys = keys or list(self.mesh.vertices())
if text == 'key':
text = {key: str(key) for key in self.mesh.vertices()}
elif text == 'index':
text = {key: str(index) for index, key in enumerate(self.mesh.vertices())}
elif isinstance(text, str):
if text in self.mesh.default_vertex_attributes:
default = self.mesh.default_vertex_attributes[text]
if isinstance(default, float):
text = {key: '{:.1f}'.format(attr[text]) for key, attr in self.mesh.vertices(True)}
else:
text = {key: str(attr[text]) for key, attr in self.mesh.vertices(True)}
radiusdict = valuedict(keys, radius, self.defaults['vertex.radius'])
textdict = valuedict(keys, text, '')
facecolordict = valuedict(keys, facecolor, self.defaults['vertex.facecolor'])
edgecolordict = valuedict(keys, edgecolor, self.defaults['vertex.edgecolor'])
edgewidthdict = valuedict(keys, edgewidth, self.defaults['vertex.edgewidth'])
textcolordict = valuedict(keys, textcolor, self.defaults['vertex.textcolor'])
fontsizedict = valuedict(keys, fontsize, self.defaults['vertex.fontsize'])
points = []
for key in keys:
points.append({
'pos': self.mesh.vertex_coordinates(key, 'xy'),
'radius': radiusdict[key],
'text': textdict[key],
'facecolor': facecolordict[key],
'edgecolor': edgecolordict[key],
'edgewidth': edgewidthdict[key],
'textcolor': textcolordict[key],
'fontsize': fontsizedict[key]
})
collection = self.draw_points(points)
self.vertexcollection = collection
if picker:
collection.set_picker(picker)
return collection
def clear_vertices(self):
"""Clears the mesh plotter vertices."""
if self.vertexcollection:
self.vertexcollection.remove()
def update_vertices(self, radius=None):
"""Updates the plotter vertex collection based on the current state of the mesh.
Parameters
----------
radius : {float, dict}, optional
The vertex radius as a single value, which will be applied to all vertices,
or as a dictionary mapping vertex keys to specific radii.
Default is the value set in ``self.defaults``.
"""
radius = valuedict(self.mesh.vertices(), radius, self.defaults['vertex.radius'])
circles = []
for key in self.mesh.vertices():
c = self.mesh.vertex_coordinates(key, 'xy')
r = radius[key]
circles.append(Circle(c, r))
self.vertexcollection.set_paths(circles)
def draw_edges(self, keys=None, width=None, color=None, text=None, textcolor=None, fontsize=None):
"""Draws the mesh edges.
Parameters
----------
keys : list
The keys of the edges to plot.
width : {float, dict}
Width of the mesh edges.
color : {color, dict}
Color for the edge lines.
text : {{'index', 'key'}, str, dict}
Strings to be displayed on the edges.
textcolor : rgb tuple or dict of rgb tuples
Color for the text to be displayed on the edges.
fontsize : int or dict of int.
Font size for the text to be displayed on the edges.
Returns
-------
object
The matplotlib edge collection object.
"""
keys = keys or list(self.mesh.edges())
if text == 'key':
text = {(u, v): '{}-{}'.format(u, v) for u, v in self.mesh.edges()}
elif text == 'index':
text = {(u, v): str(index) for index, (u, v) in enumerate(self.mesh.edges())}
else:
pass
widthdict = valuedict(keys, width, self.defaults['edge.width'])
colordict = valuedict(keys, color, self.defaults['edge.color'])
textdict = valuedict(keys, text, '')
textcolordict = valuedict(keys, textcolor, self.defaults['edge.textcolor'])
fontsizedict = valuedict(keys, fontsize, self.defaults['edge.fontsize'])
lines = []
for u, v in keys:
lines.append({
'start': self.mesh.vertex_coordinates(u, 'xy'),
'end': self.mesh.vertex_coordinates(v, 'xy'),
'width': widthdict[(u, v)],
'color': colordict[(u, v)],
'text': textdict[(u, v)],
'textcolor': textcolordict[(u, v)],
'fontsize': fontsizedict[(u, v)]
})
collection = self.draw_lines(lines)
self.edgecollection = collection
return collection
def clear_edges(self):
"""Clears the mesh plotter edges."""
if self.edgecollection:
self.edgecollection.remove()
def update_edges(self):
"""Updates the plotter edge collection based on the mesh."""
segments = []
for u, v in self.mesh.edges():
segments.append([self.mesh.vertex_coordinates(u, 'xy'), self.mesh.vertex_coordinates(v, 'xy')])
self.edgecollection.set_segments(segments)
def highlight_path(self, path, edgecolor=None, edgetext=None, edgewidth=None):
lines = []
for u, v in pairwise(path):
sp = self.mesh.vertex_coordinates(u, 'xy')
ep = self.mesh.vertex_coordinates(v, 'xy')
lines.append({
'start': sp,
'end': ep,
'width': edgewidth or self.defaults.get('edge.width', 2.0),
'color': edgecolor or self.defaults.get('edge.color', '#ff0000')
})
self.draw_lines(lines)
def draw_faces(self, keys=None, text=None,
facecolor=None, edgecolor=None, edgewidth=None, textcolor=None, fontsize=None):
"""Draws the mesh faces.
Parameters
----------
keys : list
The keys of the edges to plot.
text : {{'index', 'key'}, str, dict}
Strings to be displayed on the edges.
facecolor : {color, dict}
Color for the face fill.
edgecolor : {color, dict}
Color for the face edge.
edgewidth : {float, dict}
Width for the face edge.
textcolor : {color, dict}
Color for the text to be displayed on the edges.
fontsize : {int, dict}
Font size for the text to be displayed on the edges.
Returns
-------
object
The matplotlib face collection object.
"""
keys = keys or list(self.mesh.faces())
if text == 'key':
text = {key: str(key) for key in self.mesh.faces()}
elif text == 'index':
text = {key: str(index) for index, key in enumerate(self.mesh.faces())}
else:
pass
textdict = valuedict(keys, text, '')
facecolordict = valuedict(keys, facecolor, self.defaults['face.facecolor'])
edgecolordict = valuedict(keys, edgecolor, self.defaults['face.edgecolor'])
edgewidthdict = valuedict(keys, edgewidth, self.defaults['face.edgewidth'])
textcolordict = valuedict(keys, textcolor, self.defaults['face.textcolor'])
fontsizedict = valuedict(keys, fontsize, self.defaults['face.fontsize'])
polygons = []
for key in keys:
polygons.append({
'points': self.mesh.face_coordinates(key, 'xy'),
'text': textdict[key],
'facecolor': facecolordict[key],
'edgecolor': edgecolordict[key],
'edgewidth': edgewidthdict[key],
'textcolor': textcolordict[key],
'fontsize': fontsizedict[key]
})
collection = self.draw_polygons(polygons)
self.facecollection = collection
return collection
def clear_faces(self):
"""Clears the mesh plotter faces."""
if self.facecollection:
self.facecollection.remove()
def update_faces(self, facecolor=None):
"""Updates the plotter face collection based on the mesh."""
facecolor = valuedict(self.mesh.faces(), facecolor, self.defaults['face.facecolor'])
polygons = []
facecolors = []
for fkey in self.mesh.faces():
points = self.mesh.face_coordinates(fkey, 'xy')
polygons.append(Polygon(points))
facecolors.append(color_to_rgb(facecolor[fkey], normalize=True))
self.facecollection.set_paths(polygons)
self.facecollection.set_facecolor(facecolors)
| mit |
thaihungle/deepexp | ntm-mann/run_mimic.py | 1 | 16459 | from mimic_gen import MimicDataLoader
import tensorflow as tf
import argparse
import numpy as np
from mimic_model import MimicModel
from tensorflow.python import debug as tf_debug
import os
import time
import mimic_prepare
from sklearn.metrics import precision_recall_fscore_support as score
import utils
import random
from sklearn import metrics
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--mode', default="train")
parser.add_argument('--w_strategy',default="MRUA", help='LRUA or MRUA or LMRUA')
parser.add_argument('--restore_training', default=False, type=bool)
parser.add_argument('--data_dir',default='./data/mimic/small')
parser.add_argument('--debug', default=False, type=bool)
parser.add_argument('--seq_length', default=100, type=int)
parser.add_argument('--validation_length', default=20, type=int)
parser.add_argument('--label_type', default="one_hot", help='one_hot or five_hot')
parser.add_argument('--model', default="MANN2", help='LSTM, MANN, MANN2 or NTM')
parser.add_argument('--read_head_num', default=4, type=int)
parser.add_argument('--batch_size', default=1, type=int)
parser.add_argument('--num_epoches', default=110000, type=int)
parser.add_argument('--num_train_epoches', default=100000, type=int)
parser.add_argument('--learning_rate', default=1e-3, type=float)
parser.add_argument('--rnn_size', default=200, type=int)
parser.add_argument('--rnn_num_layers', default=1, type=int)
parser.add_argument('--memory_size', default=128, type=int)
parser.add_argument('--memory_vector_dim', default=40, type=int)
parser.add_argument('--shift_range', default=1, type=int, help='Only for model=NTM')
parser.add_argument('--write_head_num', default=1, type=int, help='Only for model=NTM. For MANN #(write_head) = #(read_head)')
parser.add_argument('--reuse', default=False, type=bool)
parser.add_argument('--multi_label', default=False, type=bool)
parser.add_argument('--save_dir', default='./save/mimic2')
parser.add_argument('--tensorboard_dir', default='./summary/mimic2')
args = parser.parse_args()
if not os.path.isdir(args.tensorboard_dir):
os.mkdir(args.tensorboard_dir)
if not os.path.isdir(args.save_dir):
os.mkdir(args.save_dir)
if args.mode == 'train':
train(args)
def train(args):
print ('TRAINING PROCESS')
print ('build model ...')
data_loader = MimicDataLoader(
args.data_dir, batch_size=args.batch_size, max_sequence=args.seq_length, split=0.5, train_keep=0.1
)
args.n_classes = data_loader.output_size
args.input_dim = data_loader.input_size
model = MimicModel(args)
print('done build model!')
with tf.Session() as sess:
if args.debug:
sess = tf_debug.LocalCLIDebugWrapperSession(sess)
if args.restore_training:
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(args.save_dir + '/' + args.model+ '/' + args.w_strategy)
saver.restore(sess, ckpt.model_checkpoint_path)
else:
saver = tf.train.Saver(tf.global_variables())
tf.global_variables_initializer().run()
train_report = args.tensorboard_dir + '/train_report/'
if not os.path.isdir(train_report):
os.mkdir(train_report)
train_report += args.model
if not os.path.isdir(train_report):
os.mkdir(train_report)
train_writer = tf.summary.FileWriter(train_report+'/'+args.w_strategy+'/', sess.graph)
test_report = args.tensorboard_dir + '/test_report/'
if not os.path.isdir(test_report):
os.mkdir(test_report)
test_report += args.model
if not os.path.isdir(test_report):
os.mkdir(test_report)
test_writer = tf.summary.FileWriter(test_report + '/'+args.w_strategy+'/', sess.graph)
print(args)
best_fscore = 0
best_acc1= best_acc5= best_acc10=0
if args.reuse and args.model == 'MANN2':# make it persitent
# try:
with tf.variable_scope("init", reuse=True):
M = tf.get_variable('M', shape=[args.batch_size, args.memory_size, args.memory_vector_dim])
updateM_placeholder = tf.placeholder(M.dtype, shape=M.get_shape())
updateM_op = M.assign(updateM_placeholder)
wr = tf.get_variable('wr', shape=[args.batch_size, args.read_head_num, args.memory_size])
updatewr_placeholder = tf.placeholder(wr.dtype, shape=wr.get_shape())
updatewr_op = wr.assign(updatewr_placeholder)
wu = tf.get_variable('wu', shape=[args.batch_size, args.memory_size])
updatewu_placeholder = tf.placeholder(wu.dtype, shape=wu.get_shape())
updatewu_op = wu.assign(updatewu_placeholder)
# except Exception:
# pass
test_fscore = termm = acc1 = acc2 = acc3 = auorc= 0
for b in range(args.num_epoches):
if b==0:
last_y=None
st = time.time()
# get data
is_train = True
if b > args.num_train_epoches:
is_train = False
data_loader.is_training = False
x_image, x_label, y = data_loader.fetch_batch(args, is_training=True, last_y = last_y)
if args.reuse:
last_y = np.reshape(y[:, -1, :], [y.shape[0], 1, y.shape[2]])
# Save model
if b % 30001 == 0 and b > 0:
# print(best_acc1)
# print(best_acc5)
# print(best_acc10)
save_m = args.save_dir + '/' + args.model + '/'
if not os.path.isdir(save_m):
os.mkdir(save_m)
print('save model...')
saver.save(sess, save_m + args.w_strategy + '/model.tfmodel', global_step=b)
test_fscore, rmm, acc1, acc2, acc3 = test_full(args, data_loader, model, sess,
data_loader.test_data_indexes)
print(rmm, acc1, acc2, acc3)
raise False
# Train
feed_dict = {model.x_image: x_image, model.x_label: x_label, model.y: y}
if is_train:
sess.run(model.train_op, feed_dict=feed_dict)
output, learning_loss = sess.run([model.o, model.learning_loss], feed_dict=feed_dict)
if args.label_type == 'five_hot':
output = utils.batch_five_hot_prob_to_one_hot(output, data_loader.output_size)
y = utils.batch_five_hot_prob_to_one_hot(y, data_loader.output_size)
state_list = sess.run(model.state_list, feed_dict=feed_dict)
# print(state_list[0]['M'])
# print('------')
# print(state_list[1]['M'])
# print('------')
# print(output)
if args.reuse and args.model == 'MANN2':
sess.run(updateM_op, {updateM_placeholder: state_list[-1]['M']})
sess.run(updatewr_op, {updatewr_placeholder: state_list[-1]['wr']})
sess.run(updatewu_op, {updatewu_placeholder: state_list[-1]['wu']})
if b % 50 == 0 and b>0:
# test_fscore,termm, acc1, acc2, acc3 = test_full(args, data_loader, model, sess,
# data_loader.test_data_indexes)
acc1, test_fscore, auorc = test_full_online(args, data_loader, model, sess)
print('cur acc {} fscore {} auorc {}'.format(acc1, test_fscore, auorc))
# print(test_fscore, te_acc1, te_acc5, te_acc10)
# if test_fscore >= best_fscore:
# best_fscore = test_fscore
#
# save_m = args.save_dir + '/' + args.model + '/'
# if not os.path.isdir(save_m):
# os.mkdir(save_m)
# print('save model...')
# saver.save(sess, save_m + args.w_strategy + '/model.tfmodel', global_step=b)
#
# _, te2_acc1, te2_acc5, te2_acc10 = test_full(data_loader, model, sess,
# data_loader.test_data_indexes[
# len(data_loader.test_data_indexes) // 2:])
# best_acc1 = te2_acc1
# best_acc5 = te2_acc5
# best_acc10 = te2_acc10
# if b % 100 == 0:
# train_fscore, trrmm = test_full(data_loader, model, sess, data_loader.train_data_indexes)
# print('------')
learning_loss = learning_loss/args.seq_length
acc = test_f(args, output, y)
summary = tf.Summary()
summary.value.add(tag='train_loss', simple_value=learning_loss)
summary.value.add(tag='train_acc', simple_value=acc)
# summary.value.add(tag='train_fscore', simple_value=train_fscore)
summary.value.add(tag='test_fscore', simple_value=test_fscore)
# summary.value.add(tag='rmm', simple_value=termm)
summary.value.add(tag='test_acc', simple_value=acc1)
summary.value.add(tag='test_auorc', simple_value=auorc)
# summary.value.add(tag='tr_acc10', simple_value=acc3)
# summary.value.add(tag='te_acc1', simple_value=te_acc1)
# summary.value.add(tag='te_acc5', simple_value=te_acc5)
# summary.value.add(tag='te_acc10', simple_value=te_acc10)
if is_train:
train_writer.add_summary(summary, b)
train_writer.flush()
else:
test_writer.add_summary(summary, b)
test_writer.flush()
print('episode: {} with cur loss: {} cur acc: {} in {} s'.format(b, learning_loss, acc, time.time()-st))
def test_full(args, data_loader, model, sess, list_index):
all_output = []
all_y = []
ii = 0
for _ in range(len(list_index) // data_loader.batch_size):
if ii + data_loader.batch_size < len(list_index):
inds = list(range(ii, ii + data_loader.batch_size))
else:
inds = list(range(len(list_index) - data_loader.batch_size - 1,
len(list_index)))
ii += data_loader.batch_size
# print(list_index)
x_image, x_label, y, yr = data_loader.predict_index2data(args, list_index, inds)
feed_dict = {model.x_image: x_image, model.x_label: x_label, model.y: y}
output= sess.run(model.o, feed_dict=feed_dict)
all_output.append(output[:, 0, :])
if args.multi_label:
all_y.append(yr[:, 0, :])
else:
all_y.append(y[:, 0, :])
if args.label_type == 'five_hot':
all_output = utils.batch_five_hot_prob_to_one_hot(np.asarray(all_output), data_loader.output_size)
if args.label_type == 'five_hot' and not args.multi_label:
all_y = utils.batch_five_hot_prob_to_one_hot(np.asarray(all_y), data_loader.output_size)
y_t_p = np.asarray(np.reshape(np.asarray(all_output), (-1, data_loader.output_size)), dtype=np.float32)
y_prob=y_t_p
# y_t_p = np.argmax(y_t_p, axis=-1)
y_t_r = np.asarray(np.reshape(np.asarray(all_y), (-1, data_loader.output_size)), dtype=np.float32)
# y_t_r = np.argmax(y_t_r, axis=-1)
# print(y_t_r)
# print('=====')
# print(y_t_p)
print(y_t_p.shape)
print(y_t_r.shape)
if not args.multi_label:
rmm = mimic_prepare.mrr(y_prob, range(data_loader.output_size), np.argmax(y_t_r, axis=-1))
else:
rmm = mimic_prepare.mrr(y_prob, range(data_loader.output_size),y_t_r)
# print(metrics.classification_report(y_t_r, y_t_p))
acc1 = mimic_prepare.precision_at_k(y_prob, range(data_loader.output_size),y_t_r,1)
acc2 = mimic_prepare.precision_at_k(y_prob, range(data_loader.output_size), y_t_r, 2)
acc3 = mimic_prepare.precision_at_k(y_prob, range(data_loader.output_size), y_t_r, 10)
precision, recall, fscore, support = score(np.argmax(y_t_r,axis=-1), np.argmax(y_t_p,axis=-1))
print("score", np.average(precision), np.average(recall), np.average(fscore))
return np.average(fscore), rmm, acc1, acc2, acc3
def test_f(args, y, output):
# print('{} vs {}'.format(one_hot_decode(y), one_hot_decode(output)))
# print('{} vs {}'.format(y.shape, output.shape))
return np.mean(np.asarray(np.equal(np.argmax(y, axis=-1),np.argmax(output, axis=-1)),dtype=np.float32))
def test_full_online(args, data_loader, model, sess):
if args.reuse and args.model == 'MANN2': # make it persitent
# try:
with tf.variable_scope("init", reuse=True):
M = tf.get_variable('M', shape=[args.batch_size, args.memory_size, args.memory_vector_dim])
updateM_placeholder = tf.placeholder(M.dtype, shape=M.get_shape())
updateM_op = M.assign(updateM_placeholder)
wr = tf.get_variable('wr', shape=[args.batch_size, args.read_head_num, args.memory_size])
updatewr_placeholder = tf.placeholder(wr.dtype, shape=wr.get_shape())
updatewr_op = wr.assign(updatewr_placeholder)
wu = tf.get_variable('wu', shape=[args.batch_size, args.memory_size])
updatewu_placeholder = tf.placeholder(wu.dtype, shape=wu.get_shape())
updatewu_op = wu.assign(updatewu_placeholder)
cur_memory = sess.run([M, wr, wu])
acc=[]
fscores=[]
auorc = []
for _ in range(args.validation_length):
num_t = len(data_loader.test_data_indexes)
num_loop = num_t//args.seq_length+1
# print('valid {} num loop {}'.format(_,num_loop))
list_index = list(range(num_t))
random.shuffle(list_index)
for l in range(num_loop):
if l ==0:
last_y=None
fr = l*args.seq_length
to = (l+1)*args.seq_length
if fr >=num_t:
break
if to >=num_t:
to=num_t
rnum=to-fr+1
x_image, x_label, y = data_loader.fetch_batch(args, is_training=False,
last_y = last_y,
fix_list_index=data_loader.test_data_indexes[fr:to])
# print(list_index[fr:to])
# print(np.argmax(y,axis=-1)[0,:4])
last_y = np.reshape(y[:,-1,:],[y.shape[0],1,y.shape[2]])
feed_dict = {model.x_image: x_image, model.x_label: x_label, model.y: y}
output, learning_loss = sess.run([model.o, model.learning_loss], feed_dict=feed_dict)
if args.label_type == 'five_hot':
output = utils.batch_five_hot_prob_to_one_hot(output, data_loader.output_size)
y = utils.batch_five_hot_prob_to_one_hot(y, data_loader.output_size)
y = y[:,:rnum,:]
output = output[:, :rnum, :]
acc.append(test_f(args, y, output))
precision, recall, fscore, support = score(np.reshape(np.asarray(np.argmax(y,axis=-1)), [-1]),
np.reshape(np.argmax(output, axis=-1), [-1]))
fscores.append(np.mean(fscore))
auorc.append(metrics.roc_auc_score(np.reshape(np.asarray(y), [-1]),
np.reshape(output, [-1]),
average='macro', sample_weight=None))
state_list = sess.run(model.state_list, feed_dict=feed_dict)
if args.reuse and args.model == 'MANN2':
sess.run(updateM_op, {updateM_placeholder: state_list[-1]['M']})
sess.run(updatewr_op, {updatewr_placeholder: state_list[-1]['wr']})
sess.run(updatewu_op, {updatewu_placeholder: state_list[-1]['wu']})
if args.reuse and args.model == 'MANN2':
sess.run(updateM_op, {updateM_placeholder: cur_memory[0]})
sess.run(updatewr_op, {updatewr_placeholder: cur_memory[1]})
sess.run(updatewu_op, {updatewu_placeholder: cur_memory[2]})
return np.mean(acc), np.mean(fscores), np.mean(auorc)
if __name__ == '__main__':
main() | mit |
rodorad/spark-tk | regression-tests/sparktkregtests/testcases/frames/frame_group_by_test.py | 13 | 10492 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Test functionality of group_by, including aggregation_arguments """
import unittest
import pandas as pd
import numpy as np
import math
from sparktkregtests.lib import sparktk_test
class GroupByTest(sparktk_test.SparkTKTestCase):
# Aggregates and names for non-numeric aggregates
# (some aggregates are not defined on integers)
# atk aggregates, then numpy aggregates
pd_cols_str = ['size', '<lambda>', 'max', 'min']
numpy_aggs_str = ['size',
lambda x: pd.Series.nunique(x, False),
'max',
'min']
atk_cols_str = ['_COUNT', '_COUNT_DISTINCT', '_MAX', '_MIN']
pd_cols = ['mean', 'size', '<lambda>', 'max',
'min', 'std', 'nansum', 'var']
numpy_aggs = ['mean',
'size',
lambda x: pd.Series.nunique(x, False),
'max',
'min',
'std',
np.nansum,
'var']
atk_cols = ['_AVG', '_COUNT', '_COUNT_DISTINCT', '_MAX',
'_MIN', '_STDEV', '_SUM', '_VAR']
def setUp(self):
"""Build test frame"""
super(GroupByTest, self).setUp()
# Aggregates to test on strings
self.aggs_str = [self.context.agg.count,
self.context.agg.count_distinct,
self.context.agg.max,
self.context.agg.min]
# Aggregates for numeric columns
self.aggs = [self.context.agg.avg,
self.context.agg.count,
self.context.agg.count_distinct,
self.context.agg.max,
self.context.agg.min,
self.context.agg.stdev,
self.context.agg.sum,
self.context.agg.var]
schema_colors = [("Int32_0_15", int),
("Int32_0_31", int),
("colors", str),
("Int64_0_15", int),
("Int64_0_31", int),
("Float32_0_15", float),
("Float32_0_31", float),
("Float64_0_15", float),
("Float64_0_31", float)]
dataset = self.get_file("colors_32_9cols_128rows.csv")
self.frame = self.context.frame.import_csv(
dataset, schema=schema_colors)
def test_stats_on_string_avg(self):
"""Non-numeric aggregates error on non-numeric column"""
with self.assertRaises(Exception):
self.frame.group_by('colors', {'colors': self.context.agg.avg})
def test_stats_on_string_stdev(self):
"""Non-numeric aggregates error on non-numeric column"""
with self.assertRaises(Exception):
self.frame.group_by('colors', {'colors': self.context.agg.stdev})
def test_stats_on_string_sum(self):
"""Non-numeric aggregates error on non-numeric column"""
with self.assertRaises(Exception):
self.frame.group_by('colors', {'colors': self.context.agg.sum})
def test_stats_on_string_var(self):
"""Non-numeric aggregates error on non-numeric column"""
with self.assertRaises(Exception):
self.frame.group_by('colors', {'colors': self.context.agg.var})
def test_invalid_column_name(self):
"""Aggregate on non-existant column errors"""
with self.assertRaises(Exception):
self.frame.group_by(
'InvalidColumnName', {'colors': self.context.agg.var})
def test_group_int32_standard(self):
"""Test groupby on 1 column, int32"""
stats = self.frame.group_by(['Int32_0_15'], {'Int32_0_31': self.aggs})
self._validate(stats, 'Int32_0_31', ['Int32_0_15'])
def test_group_float32_standard(self):
"""Test groupby on 1 column, float32"""
stats = self.frame.group_by(
['Float32_0_15'], {'Float32_0_31': self.aggs})
self._validate(stats, 'Float32_0_31', ['Float32_0_15'])
def test_group_float64_standard(self):
"""Test groupby on 1 column, float64"""
stats = self.frame.group_by(
['Float64_0_15'], {'Float64_0_31': self.aggs})
self._validate(stats, 'Float64_0_31', ['Float64_0_15'])
def test_group_int64_standard(self):
"""Test groupby on 1 column, int64"""
stats = self.frame.group_by(['Int64_0_15'], {'Int64_0_31': self.aggs})
self._validate(stats, 'Int64_0_31', ['Int64_0_15'])
def Test_group_by_str_standard(self):
"""Test groupby on 1 column, string"""
stats = self.frame.group_by(['colors'], {'Int32_0_31': self.aggs})
self._validate_str(stats, 'Int32_0_31', ['colors'])
def test_group_by_str_agg_str(self):
"""Test groupby on 1 column, string, aggregate is string"""
stats = self.frame.group_by(['colors'], {'colors': self.aggs_str})
self._validate_str(stats, 'colors', ['colors'])
def test_group_int32_multiple_cols(self):
"""Test groupby on multiple columns, int32"""
stats = self.frame.group_by(
['Int32_0_15', 'Int32_0_31'], {'Int32_0_31': self.aggs})
self._validate(stats, 'Int32_0_31', ['Int32_0_15', 'Int32_0_31'])
def test_group_float32_multiple_cols(self):
"""Test groupby on multiple columns, float32"""
stats = self.frame.group_by(
['Float32_0_15', 'Float32_0_31'], {'Float32_0_31': self.aggs})
self._validate(stats, 'Float32_0_31', ['Float32_0_15', 'Float32_0_31'])
def test_group_float64_multiple_cols(self):
"""Test groupby on multiple columns, float64"""
stats = self.frame.group_by(
['Float64_0_15', 'Float64_0_31'], {'Float32_0_31': self.aggs})
self._validate(stats, 'Float32_0_31', ['Float64_0_15', 'Float64_0_31'])
def test_group_int64_multiple_cols(self):
"""Test groupby on multiple columns, int64"""
stats = self.frame.group_by(
['Int64_0_15', 'Int64_0_31'], {'Int64_0_31': self.aggs})
self._validate(stats, 'Int64_0_31', ['Int64_0_15', 'Int64_0_31'])
def test_groupby_str_multiple_cols(self):
"""Test groupby on multiple columns, string"""
stats = self.frame.group_by(
['colors', 'Int32_0_15'], {'colors': self.aggs_str})
self._validate_str(stats, 'colors', ['colors', 'Int32_0_15'])
def test_group_int32_none(self):
"""Test groupby none, int32 aggregate"""
stats = self.frame.group_by(None, {'Int32_0_31': self.aggs})
self._validate_single_group(stats, None, 'Int32_0_31')
def test_group_float32_none(self):
"""Test groupby none, float32 aggregate"""
stats = self.frame.group_by(None, {'Float32_0_31': self.aggs})
self._validate_single_group(stats, None, 'Float32_0_31')
def test_group_float64_none(self):
"""Test groupby none, float64 aggregate"""
stats = self.frame.group_by(None, {'Float64_0_31': self.aggs})
self._validate_single_group(stats, None, 'Float64_0_31')
def test_group_int64_none(self):
"""Test groupby none, int64 aggregate"""
stats = self.frame.group_by(None, {'Int64_0_31': self.aggs})
self._validate_single_group(stats, None, 'Int64_0_31')
def _validate_single_group(self, stats, groupby_cols, aggregator):
# Validate the result of atk groupby and pandas groupby are the same
# when there is single group (none)
pd_stats = stats.to_pandas(stats.count())
new_frame = self.frame.to_pandas(self.frame.count())
gb = new_frame.groupby(lambda x: 0)[aggregator].agg(self.numpy_aggs)
int_cols = map(lambda x: aggregator+x, self.atk_cols)
for k, l in zip(int_cols, self.pd_cols):
self.assertAlmostEqual(gb.loc[0][l], pd_stats.loc[0][k], places=4)
def _validate(self, stats, aggregator, groupby_cols):
# Validate atk and pandas groupby are the same,
# Cast the index to integer, and use all aggregates, as column
# for aggregatees is numeric
self._validate_helper(
stats, aggregator, groupby_cols, self.numpy_aggs,
self.pd_cols, self.atk_cols, int)
def _validate_str(self, stats, aggregator, groupby_cols):
# Validate atk and pandas groupby are the same,
# Cast the index to the same value, and use strin aggregates, as column
# for aggregatees is a string
self._validate_helper(
stats, aggregator, groupby_cols, self.numpy_aggs_str,
self.pd_cols_str, self.atk_cols_str, lambda x: x)
def _validate_helper(self, stats, aggregator, groupby_cols,
aggs, pd_cols, atk_cols, mapper):
# Get and compare results of atk and pandas, cast as appropriate
pd_stats = stats.to_pandas(stats.count())
new_frame = self.frame.to_pandas(self.frame.count())
gb = new_frame.groupby(groupby_cols)[aggregator].agg(aggs)
int_cols = map(lambda x: aggregator+x, atk_cols)
for _, i in pd_stats.iterrows():
for k, l in zip(int_cols, pd_cols):
if ((type(i[k]) is np.float64 or type(i[k]) is float) and
math.isnan(i[k])):
self.assertTrue(
math.isnan(
gb.loc[tuple(
map(lambda x: mapper(i[x]),
groupby_cols))][l]))
else:
self.assertAlmostEqual(
gb.loc[tuple(
map(lambda x: mapper(i[x]), groupby_cols))][l],
i[k], places=4)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
patricksnape/menpo | menpo/image/test/test_rasterize.py | 2 | 1961 | import numpy as np
from numpy.testing import assert_allclose
from menpo.image import Image
from menpo.image.rasterize import rasterize_landmarks_2d
from menpo.shape import PointCloud, PointUndirectedGraph
centre = PointCloud([[4.5, 4.5]])
line = PointUndirectedGraph(
np.array([[2, 4.5], [8, 4.5]]), adjacency_matrix=np.array([[0, 1], [1, 0]])
)
def test_rasterize_matplotlib_basic():
im = Image.init_blank([11, 11], fill=0, n_channels=1)
im.landmarks["test"] = centre
new_im = rasterize_landmarks_2d(
im,
group="test",
render_lines=False,
marker_style=".",
marker_face_colour="r",
marker_size=2,
marker_edge_width=0,
backend="matplotlib",
)
assert new_im.n_channels == 3
assert new_im.shape == (11, 11)
assert_allclose(new_im.pixels[:, 5, 5], [255, 0, 0])
def test_rasterize_pillow_basic():
im = Image.init_blank([11, 11], fill=0, n_channels=3)
im.landmarks["test"] = centre
new_im = rasterize_landmarks_2d(
im,
group="test",
render_lines=False,
marker_style="s",
marker_face_colour="r",
marker_size=1,
marker_edge_width=0,
backend="pillow",
)
assert new_im.n_channels == 3
assert new_im.shape == (11, 11)
assert_allclose(new_im.pixels[0, 3:6, 3:6], 255)
def test_rasterize_pillow_basic_line():
im = Image.init_blank([11, 11], fill=0, n_channels=3)
im.landmarks["test"] = line
new_im = rasterize_landmarks_2d(
im,
group="test",
render_lines=True,
line_width=1,
line_colour="b",
marker_style="s",
marker_face_colour="r",
marker_size=1,
marker_edge_width=0,
backend="pillow",
)
assert new_im.n_channels == 3
assert_allclose(new_im.pixels[0, 1:4, 3:6], 255)
assert_allclose(new_im.pixels[0, 7:-1, 3:6], 255)
assert_allclose(new_im.pixels[2, 4:7, 4], 255)
| bsd-3-clause |
freeman-lab/dask | dask/dataframe/shuffle.py | 1 | 6686 | from itertools import count
from collections import Iterator
from math import ceil
from toolz import merge, accumulate, merge_sorted
import toolz
from operator import getitem, setitem
import pandas as pd
import numpy as np
from .. import threaded
from ..optimize import cull
from .core import DataFrame, Series, get, _Frame, tokens
from ..compatibility import unicode
from ..utils import ignoring
from .utils import (strip_categories, unique, shard_df_on_index, _categorize,
get_categories)
def set_index(df, index, npartitions=None, compute=True, **kwargs):
""" Set DataFrame index to new column
Sorts index and realigns Dataframe to new sorted order. This shuffles and
repartitions your data.
"""
npartitions = npartitions or df.npartitions
if not isinstance(index, Series):
index2 = df[index]
else:
index2 = index
divisions = (index2
.quantiles(np.linspace(0, 100, npartitions+1))
.compute())
return df.set_partition(index, divisions, compute=compute, **kwargs)
def set_partition(df, index, divisions, compute=False, **kwargs):
""" Group DataFrame by index
Sets a new index and partitions data along that index according to
divisions. Divisions are often found by computing approximate quantiles.
The function ``set_index`` will do both of these steps.
Parameters
----------
df: DataFrame/Series
Data that we want to re-partition
index: string or Series
Column to become the new index
divisions: list
Values to form new divisions between partitions
See Also
--------
set_index
shuffle
partd
"""
if isinstance(index, _Frame):
assert df.divisions == index.divisions
import partd
p = ('zpartd' + next(tokens),)
# Get Categories
catname = 'set-partition--get-categories' + next(tokens)
dsk1 = {catname: (get_categories, df._keys()[0]),
p: (partd.PandasBlocks, (partd.Buffer, (partd.Dict,), (partd.File,)))}
# Partition data on disk
name = 'set-partition--partition' + next(tokens)
if isinstance(index, _Frame):
dsk2 = dict(((name, i),
(_set_partition, part, ind, divisions, p))
for i, (part, ind)
in enumerate(zip(df._keys(), index._keys())))
else:
dsk2 = dict(((name, i),
(_set_partition, part, index, divisions, p))
for i, part
in enumerate(df._keys()))
# Barrier
barrier_token = 'barrier' + next(tokens)
dsk3 = {barrier_token: (barrier, list(dsk2))}
if compute:
dsk = merge(df.dask, dsk1, dsk2, dsk3)
if isinstance(index, _Frame):
dsk.update(index.dask)
p, barrier_token = get(dsk, [p, barrier_token], **kwargs)
# Collect groups
name = 'set-partition--collect' + next(tokens)
dsk4 = dict(((name, i),
(_categorize, catname, (_set_collect, i, p, barrier_token)))
for i in range(len(divisions) - 1))
dsk = merge(df.dask, dsk1, dsk2, dsk3, dsk4)
if isinstance(index, _Frame):
dsk.update(index.dask)
if compute:
dsk = cull(dsk, list(dsk4.keys()))
return DataFrame(dsk, name, df.columns, divisions)
def barrier(args):
list(args)
return 0
def _set_partition(df, index, divisions, p):
""" Shard partition and dump into partd """
df = strip_categories(df)
divisions = list(divisions)
df = df.set_index(index)
shards = shard_df_on_index(df, divisions[1:-1])
p.append(dict(enumerate(shards)))
def _set_collect(group, p, barrier_token):
""" Get new partition dataframe from partd """
try:
return p.get(group)
except ValueError:
return pd.DataFrame()
def shuffle(df, index, npartitions=None):
""" Group DataFrame by index
Hash grouping of elements. After this operation all elements that have
the same index will be in the same partition. Note that this requires
full dataset read, serialization and shuffle. This is expensive. If
possible you should avoid shuffles.
This does not preserve a meaningful index/partitioning scheme.
See Also
--------
set_index
set_partition
partd
"""
if isinstance(index, _Frame):
assert df.divisions == index.divisions
if npartitions is None:
npartitions = df.npartitions
import partd
p = ('zpartd' + next(tokens),)
dsk1 = {p: (partd.PandasBlocks, (partd.Buffer, (partd.Dict,),
(partd.File,)))}
# Partition data on disk
name = 'shuffle-partition' + next(tokens)
if isinstance(index, _Frame):
dsk2 = dict(((name, i),
(partition, part, ind, npartitions, p))
for i, (part, ind)
in enumerate(zip(df._keys(), index._keys())))
else:
dsk2 = dict(((name, i),
(partition, part, index, npartitions, p))
for i, part
in enumerate(df._keys()))
# Barrier
barrier_token = 'barrier' + next(tokens)
dsk3 = {barrier_token: (barrier, list(dsk2))}
# Collect groups
name = 'shuffle-collect' + next(tokens)
dsk4 = dict(((name, i),
(collect, i, p, barrier_token))
for i in range(npartitions))
divisions = [None] * (npartitions + 1)
dsk = merge(df.dask, dsk1, dsk2, dsk3, dsk4)
if isinstance(index, _Frame):
dsk.update(index.dask)
return DataFrame(dsk, name, df.columns, divisions)
def partition(df, index, npartitions, p):
""" Partition a dataframe along a grouper, store partitions to partd """
rng = pd.Series(np.arange(len(df)))
if isinstance(index, Iterator):
index = list(index)
if not isinstance(index, (pd.Index, pd.core.generic.NDFrame)):
index = df[index]
if isinstance(index, pd.Index):
groups = rng.groupby([abs(hash(x)) % npartitions for x in index])
if isinstance(index, pd.Series):
groups = rng.groupby(index.map(lambda x: abs(hash(x)) % npartitions).values)
elif isinstance(index, pd.DataFrame):
groups = rng.groupby(index.apply(
lambda row: abs(hash(tuple(row))) % npartitions,
axis=1).values)
d = dict((i, df.iloc[groups.groups[i]]) for i in range(npartitions)
if i in groups.groups)
p.append(d)
def collect(group, p, barrier_token):
""" Collect partitions from partd, yield dataframes """
return p.get(group)
| bsd-3-clause |
zhenxu66/scipy2015-blaze-bokeh | app/viz2.py | 12 | 4846 | # -*- coding: utf-8 -*-
import math
from collections import OrderedDict
import numpy as np
import pandas as pd
import netCDF4
from bokeh.plotting import figure, show, output_notebook
from bokeh.models import DatetimeTickFormatter, ColumnDataSource, HoverTool, Plot, Range1d
from bokeh.palettes import RdBu11
from bokeh.models.glyphs import Text, Rect
import utils.world_countries as wc
from utils.colormap import RGBAColorMapper
colormap = RGBAColorMapper(-6, 6, RdBu11)
def get_slice(t, year, month):
i = (year - 1850)*12 + month - 1
return colormap.color(t[i, :, :])
def climate_map():
data = netCDF4.Dataset('data/Land_and_Ocean_LatLong1.nc')
t = data.variables['temperature']
image = get_slice(t, 1950, 1)
world_countries = wc.data.copy()
worldmap = pd.DataFrame.from_dict(world_countries, orient='index')
# Create your plot
p = figure(width=900, height=500, x_axis_type=None, y_axis_type=None,
x_range=[-180,180], y_range=[-90,90], toolbar_location="left")
p.image_rgba(
image=[image],
x=[-180], y=[-90],
dw=[360], dh=[180], name='image'
)
p.patches(xs=worldmap['lons'], ys=worldmap['lats'], fill_color="white", fill_alpha=0,
line_color="black", line_width=0.5)
return p
def legend():
# Set ranges
xdr = Range1d(0, 100)
ydr = Range1d(0, 500)
# Create plot
plot = Plot(
x_range=xdr,
y_range=ydr,
title="",
plot_width=100,
plot_height=500,
min_border=0,
toolbar_location=None,
outline_line_color="#FFFFFF",
)
# For each color in your palette, add a Rect glyph to the plot with the appropriate properties
palette = RdBu11
width = 40
for i, color in enumerate(palette):
rect = Rect(
x=40, y=(width * (i + 1)),
width=width, height=40,
fill_color=color, line_color='black'
)
plot.add_glyph(rect)
# Add text labels and add them to the plot
minimum = Text(x=50, y=0, text=['-6 ºC'])
plot.add_glyph(minimum)
maximum = Text(x=50, y=460, text=['6 ºC'])
plot.add_glyph(maximum)
return plot
def timeseries():
# Get data
df = pd.read_csv('data/Land_Ocean_Monthly_Anomaly_Average.csv')
df['datetime'] = pd.to_datetime(df['datetime'])
df = df[['anomaly','datetime']]
df['moving_average'] = pd.rolling_mean(df['anomaly'], 12)
df = df.fillna(0)
# List all the tools that you want in your plot separated by comas, all in one string.
TOOLS="crosshair,pan,wheel_zoom,box_zoom,reset,hover,previewsave"
# New figure
t = figure(x_axis_type = "datetime", width=1000, height=200,tools=TOOLS)
# Data processing
# The hover tools doesn't render datetime appropriately. We'll need a string.
# We just want dates, remove time
f = lambda x: str(x)[:7]
df["datetime_s"]=df[["datetime"]].applymap(f)
source = ColumnDataSource(df)
# Create plot
t.line('datetime', 'anomaly', color='lightgrey', legend='anom', source=source)
t.line('datetime', 'moving_average', color='red', legend='avg', source=source, name="mva")
# Style
xformatter = DatetimeTickFormatter(formats=dict(months=["%b %Y"], years=["%Y"]))
t.xaxis[0].formatter = xformatter
t.xaxis.major_label_orientation = math.pi/4
t.yaxis.axis_label = 'Anomaly(ºC)'
t.legend.orientation = "bottom_right"
t.grid.grid_line_alpha=0.2
t.toolbar_location=None
# Style hover tool
hover = t.select(dict(type=HoverTool))
hover.tooltips = """
<div>
<span style="font-size: 15px;">Anomaly</span>
<span style="font-size: 17px; color: red;">@anomaly</span>
</div>
<div>
<span style="font-size: 15px;">Month</span>
<span style="font-size: 10px; color: grey;">@datetime_s</span>
</div>
"""
hover.renderers = t.select("mva")
# Show plot
#show(t)
return t
# Add title
def title():
# Data
year = 1850
month = 1
years = [str(x) for x in np.arange(1850, 2015, 1)]
months = [str(x) for x in np.arange(1, 13, 1)]
months_str = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
month_str = months_str[month-1]
title = figure(width=1200, height=100, x_range=(0, 1200), y_range=(0, 100), toolbar_location=None,
x_axis_type=None, y_axis_type=None, outline_line_color="#FFFFFF", tools="", min_border=0)
title.text(x=500, y=5, text=[month_str], text_font_size='36pt', text_color='black',
name="month", text_font="Georgia")
title.text(x=350, y=5, text=[str(year)], text_font_size='36pt', text_color='black',
name="year",text_font="Georgia")
return title
| mit |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/examples/feature_selection/plot_permutation_test_for_classification.py | 1 | 2270 | """
=================================================================
Test with permutations the significance of a classification score
=================================================================
In order to test if a classification score is significative a technique
in repeating the classification procedure after randomizing, permuting,
the labels. The p-value is then given by the percentage of runs for
which the score obtained is greater than the classification score
obtained in the first place.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import permutation_test_score
from sklearn.svm import SVC
##############################################################################
# Loading a dataset
iris = datasets.load_iris()
X = iris.data
y = iris.target
n_classes = np.unique(y).size
# Some noisy data not correlated
random = np.random.RandomState(seed=0)
E = random.normal(size=(len(X), 2200))
# Add noisy data to the informative features for make the task harder
X = np.c_[X, E]
svm = SVC(kernel='linear')
cv = StratifiedKFold(2)
score, permutation_scores, pvalue = permutation_test_score(
svm, X, y, scoring="accuracy", cv=cv, n_permutations=100, n_jobs=1)
print("Classification score %s (pvalue : %s)" % (score, pvalue))
###############################################################################
# View histogram of permutation scores
plt.hist(permutation_scores, 20, label='Permutation scores')
ylim = plt.ylim()
# BUG: vlines(..., linestyle='--') fails on older versions of matplotlib
# plt.vlines(score, ylim[0], ylim[1], linestyle='--',
# color='g', linewidth=3, label='Classification Score'
# ' (pvalue %s)' % pvalue)
# plt.vlines(1.0 / n_classes, ylim[0], ylim[1], linestyle='--',
# color='k', linewidth=3, label='Luck')
plt.plot(2 * [score], ylim, '--g', linewidth=3,
label='Classification Score'
' (pvalue %s)' % pvalue)
plt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck')
plt.ylim(ylim)
plt.legend()
plt.xlabel('Score')
plt.show()
| mit |
avmarchenko/exatomic | exatomic/core/tensor.py | 3 | 4436 | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2018, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
from exa import DataFrame
import numpy as np
import pandas as pd
class Tensor(DataFrame):
"""
The tensor dataframe.
+---------------+----------+-----------------------------------------+
| Column | Type | Description |
+===============+==========+=========================================+
| xx | float | 0,0 position in tensor |
+---------------+----------+-----------------------------------------+
| xy | float | 0,1 position in tensor |
+---------------+----------+-----------------------------------------+
| xz | float | 0,2 position in tensor |
+---------------+----------+-----------------------------------------+
| yx | float | 1,0 position in tensor |
+---------------+----------+-----------------------------------------+
| yy | float | 1,1 position in tensor |
+---------------+----------+-----------------------------------------+
| yz | float | 1,2 position in tensor |
+---------------+----------+-----------------------------------------+
| zx | float | 3,0 position in tensor |
+---------------+----------+-----------------------------------------+
| zy | float | 3,1 position in tensor |
+---------------+----------+-----------------------------------------+
| zz | float | 3,2 position in tensor |
+---------------+----------+-----------------------------------------+
| frame | category | frame value to which atach tensor |
+---------------+----------+-----------------------------------------+
| atom | int | atom index of molecule to place tensor |
+---------------+----------+-----------------------------------------+
| label | category | label of the type of tensor |
+---------------+----------+-----------------------------------------+
"""
_index = 'tensor'
_columns = ['xx','xy','xz','yx','yy','yz','zx','zy','zz',
'frame','atom','label']
_categories = {'frame': np.int64, 'label': str}
#@property
#def _constructor(self):
# return Tensor
@classmethod
def from_file(cls, filename):
"""
A file reader that will take a tensor file and extract all
necessary information. There is a specific file format in place
and is as follows.
frame label atom
xx xy xz
yx yy yz
zx zy zz
For multiple tensors just append the same format as above without
whitespace unless leaving the frame, label, atom attributes as empty.
Args:
filename (str): file pathname
Returns:
tens (:class:`~exatomic.tensor.Tensor`): Tensor table with the tensor attributes
"""
df = pd.read_csv(filename, delim_whitespace=True, header=None,
skip_blank_lines=False)
meta = df[::4]
idxs = meta.index.values
n = len(idxs)
df = df[~df.index.isin(idxs)]
df[1] = df[1].astype(np.float64)
df['grp'] = [i for i in range(n) for j in range(3)]
df = pd.DataFrame(df.groupby('grp').apply(lambda x:
x.unstack().values[:-3]).values.tolist(),
columns=['xx','xy','xz','yx','yy','yz','zx','zy','zz'])
# scale = []
# for i in df.index.values:
# scale.append(5./abs(df.loc[i,:]).max().astype(np.float64))
meta.reset_index(drop=True, inplace=True)
meta.rename(columns={0: 'frame', 1: 'label', 2: 'atom'}, inplace=True)
df = pd.concat([meta, df], axis=1)
df['atom'] = df['atom'].astype(np.int64)
df['frame'] = df['frame'].astype(np.int64)
# df['scale'] = scale
# print(df)
return cls(df)
def add_tensor(uni, fp):
"""
Simple function to add a tensor object to the universe.
Args:
uni (universe): Universe object
fp (str): file pathname
"""
uni.tensor = Tensor.from_file(fp)
| apache-2.0 |
ryfeus/lambda-packs | LightGBM_sklearn_scipy_numpy/source/sklearn/utils/__init__.py | 10 | 15503 | """
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable,
check_symmetric)
from .class_weight import compute_class_weight, compute_sample_weight
from ..externals.joblib import cpu_count
from ..exceptions import DataConversionWarning
from .deprecation import deprecated
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"check_random_state",
"compute_class_weight", "compute_sample_weight",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable',
"check_symmetric", "indices_to_mask", "deprecated"]
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
super(Bunch, self).__init__(kwargs)
def __setattr__(self, key, value):
self[key] = value
def __dir__(self):
return self.keys()
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __setstate__(self, state):
# Bunch pickles generated with scikit-learn 0.16.* have an non
# empty __dict__. This causes a surprising behaviour when
# loading these pickles scikit-learn 0.17: reading bunch.key
# uses __dict__ but assigning to bunch.key use __setattr__ and
# only changes bunch['key']. More details can be found at:
# https://github.com/scikit-learn/scikit-learn/issues/6196.
# Overriding __setstate__ to be a noop has the effect of
# ignoring the pickled __dict__
pass
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask : array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.signedinteger):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def axis0_safe_slice(X, mask, len_mask):
"""
This mask is safer than safe_mask since it returns an
empty array, when a sparse matrix is sliced with a boolean mask
with all False, instead of raising an unhelpful error in older
versions of SciPy.
See: https://github.com/scipy/scipy/issues/5361
Also note that we can avoid doing the dot product by checking if
the len_mask is not zero in _huber_loss_and_gradient but this
is not going to be the bottleneck, since the number of outliers
and non_outliers are typically non-zero and it makes the code
tougher to follow.
"""
if len_mask != 0:
return X[safe_mask(X, mask), :]
return np.zeros(shape=(0, X.shape[1]))
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list, pandas.DataFrame, pandas.Series.
Data from which to sample rows or items.
indices : array-like of int
Indices according to which X will be subsampled.
Returns
-------
subset
Subset of X on first axis
Notes
-----
CSR, CSC, and LIL sparse matrices are supported. COO sparse matrices are
not supported.
"""
if hasattr(X, "iloc"):
# Work-around for indexing with read-only indices in pandas
indices = indices if indices.flags.writeable else indices.copy()
# Pandas Dataframes and Series
try:
return X.iloc[indices]
except ValueError:
# Cython typed memoryviews internally used in pandas do not support
# readonly buffers.
warnings.warn("Copying input dataframe for slicing.",
DataConversionWarning)
return X.copy().iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
If replace is False it should not be larger than the length of
arrays.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
Returns
-------
resampled_arrays : sequence of indexable data-structures
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
elif (max_n_samples > n_samples) and (not replace):
raise ValueError("Cannot sample %d out of arrays with dim %d "
"when replace is False" % (max_n_samples,
n_samples))
check_consistent_length(*arrays)
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
# convert sparse matrices to CSR for row-based indexing
arrays = [a.tocsr() if issparse(a) else a for a in arrays]
resampled_arrays = [safe_indexing(a, indices) for a in arrays]
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
shuffled_arrays : sequence of indexable data-structures
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
copy : boolean, optional, default True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], ensure_2d=False)
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
if n_packs < 1:
raise ValueError("gen_even_slices got n_packs=%s, must be >=1"
% n_packs)
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def _get_n_jobs(n_jobs):
"""Get number of jobs for the computation.
This function reimplements the logic of joblib to determine the actual
number of jobs depending on the cpu count. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful
for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Thus for n_jobs = -2, all CPUs but one are used.
Parameters
----------
n_jobs : int
Number of jobs stated in joblib convention.
Returns
-------
n_jobs : int
The actual number of jobs as positive integer.
Examples
--------
>>> from sklearn.utils import _get_n_jobs
>>> _get_n_jobs(4)
4
>>> jobs = _get_n_jobs(-2)
>>> assert jobs == max(cpu_count() - 1, 1)
>>> _get_n_jobs(0)
Traceback (most recent call last):
...
ValueError: Parameter n_jobs == 0 has no meaning.
"""
if n_jobs < 0:
return max(cpu_count() + 1 + n_jobs, 1)
elif n_jobs == 0:
raise ValueError('Parameter n_jobs == 0 has no meaning.')
else:
return n_jobs
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible.
Parameters
----------
x : iterable
"""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
def indices_to_mask(indices, mask_length):
"""Convert list of indices to boolean mask.
Parameters
----------
indices : list-like
List of integers treated as indices.
mask_length : int
Length of boolean mask to be generated.
Returns
-------
mask : 1d boolean nd-array
Boolean array that is True where indices are present, else False.
"""
if mask_length <= np.max(indices):
raise ValueError("mask_length must be greater than max(indices)")
mask = np.zeros(mask_length, dtype=np.bool)
mask[indices] = True
return mask
| mit |
kedz/cuttsum | trec2015/sbin/salience-regression-dev/salience-predictor.py | 1 | 7352 | import os
import cuttsum.events
import cuttsum.corpora
from cuttsum.pipeline import InputStreamResource
import numpy as np
import pandas as pd
def ds(X, y):
I = np.arange(y.shape[0])
I_1 = I[y[:,0] == 1]
I_0 = I[y[:,0] == 0]
size = I_1.shape[0]
np.random.shuffle(I_0)
I_0_ds = I_0[:size]
X_0 = X[I_0_ds]
X_1 = X[I_1]
y_ds = np.zeros((size * 2,), dtype="int32")
y_ds[size:2*size] = 1
X_ds = np.vstack([X_0, X_1])
I_ds = np.arange(size * 2)
np.random.shuffle(I_ds)
X_ds = X_ds[I_ds]
y_ds = y_ds[I_ds]
return X_ds, y_ds
def get_input_stream(event, extractor="goose", thresh=.8, delay=None, topk=20):
corpus = cuttsum.corpora.get_raw_corpus(event)
res = InputStreamResource()
return res.get_dataframes(event, corpus, extractor, thresh, delay, topk)
dirname = "sp"
if not os.path.exists(dirname):
os.makedirs(dirname)
basic_cols = ["BASIC length", "BASIC char length",
"BASIC doc position", "BASIC all caps ratio",
"BASIC upper ratio",
#"BASIC lower ratio",
#"BASIC punc ratio",
"BASIC person ratio",
"BASIC location ratio",
"BASIC organization ratio", "BASIC date ratio",
"BASIC time ratio", "BASIC duration ratio",
"BASIC number ratio", "BASIC ordinal ratio",
"BASIC percent ratio", "BASIC money ratio",
"BASIC set ratio", "BASIC misc ratio"]
query_bw_cols = [
"Q_sent_query_cov",
"Q_sent_syn_cov",
"Q_sent_hyper_cov",
"Q_sent_hypo_cov",
]
query_fw_cols = [
"Q_query_sent_cov",
"Q_syn_sent_cov",
"Q_hyper_sent_cov",
"Q_hypo_sent_cov",
]
lm_cols = ["LM domain avg lp",
"LM gw avg lp"]
sum_cols = [
"SUM_sbasic_sum",
"SUM_sbasic_amean",
# "SUM_sbasic_max",
"SUM_novelty_gmean",
"SUM_novelty_amean",
# "SUM_novelty_max",
"SUM_centrality",
"SUM_pagerank",
]
stream_cols = [
"STREAM_sbasic_sum",
"STREAM_sbasic_amean",
"STREAM_sbasic_max",
"STREAM_per_prob_sum",
"STREAM_per_prob_max",
"STREAM_per_prob_amean",
"STREAM_loc_prob_sum",
"STREAM_loc_prob_max",
"STREAM_loc_prob_amean",
"STREAM_org_prob_sum",
"STREAM_org_prob_max",
"STREAM_org_prob_amean",
"STREAM_nt_prob_sum",
"STREAM_nt_prob_max",
"STREAM_nt_prob_amean",
]
cols = basic_cols + query_fw_cols + lm_cols + sum_cols + stream_cols
events = [e for e in cuttsum.events.get_events()
if e.query_num < 26 and e.query_num not in set([6, 7])]
events = events
y = []
X = []
istreams = []
for event in events:
print event
istream = get_input_stream(event)
istreams.append(istream)
y_e = []
X_e = []
for df in istream:
selector = (df["n conf"] == 1) & (df["nugget probs"].apply(len) == 0)
df.loc[selector, "nugget probs"] = df.loc[selector, "nuggets"].apply(lambda x: {n:1 for n in x})
df["probs"] = df["nugget probs"].apply(lambda x: [val for key, val in x.items()] +[0])
df["probs"] = df["probs"].apply(lambda x: np.max(x))
df.loc[(df["n conf"] == 1) & (df["nuggets"].apply(len) == 0), "probs"] = 0
y_t = df["probs"].values
y_t = y_t[:, np.newaxis]
y_e.append(y_t)
X_t = df[cols].values
X_e.append(X_t)
y_e = np.vstack(y_e)
y.append(y_e)
X_e = np.vstack(X_e)
X.append(X_e)
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import tree
from sklearn.metrics import classification_report, mean_squared_error
import matplotlib.pyplot as plt
num_events = len(events)
def hard_test(istream, cols):
X = []
y = []
for df in istream:
df_c = df[df["n conf"] == 1]
y_t = (df_c["nuggets"].apply(len) > 0).values.astype("int32")
y_t = y_t[:, np.newaxis]
y.append(y_t)
X_t = df_c[cols].values
X.append(X_t)
X = np.vstack(X)
y = np.vstack(y)
return X, y
clfs = []
results = []
for i in xrange(num_events):
X_test, y_test = hard_test(istreams[i], cols)
X_test_s = X[i]
y_test_s = y[i]
X_train = np.vstack(X[0:i] + X[i+1:])
y_train = np.vstack(y[0:i] + y[i+1:])
# X_train, y_train = ds(X_train, y_train)
for max_depth in [3]: #, 2, 3, 4, 5]:
for lr in [1.,]: # 1.,]:
for est in [100, ]:
gbc = GradientBoostingRegressor(n_estimators=est, learning_rate=lr,
max_depth=max_depth, random_state=0)
gbc.fit(X_train, y_train.ravel())
y_pred = gbc.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
#prec, rec, fscore, sup = precision_recall_fscore_support(y_test, y_pred)
I = y_test.ravel().argsort()
item_index = np.where(y_test.ravel()[I] == 1)
print y_test.ravel()[I][:item_index[0][0]]
neg_mean = np.mean(y_pred.ravel()[I][:item_index[0][0]])
pos_mean = np.mean(y_pred.ravel()[I][item_index[0][0]:])
#print I
plt.plot(y_pred.ravel()[I], label="predicted")
#plt.plot(np.sort(y_test.ravel()), label="truth")
plt.plot(y_test.ravel()[I], label="truth")
plt.plot([0,item_index[0][0]], [neg_mean] * 2, "-.", label="neg mean")
plt.plot([item_index[0][0],item_index[0][-1]], [pos_mean] * 2, "-.", label="neg mean")
#plt.legend()
plt.gcf().suptitle(events[i].fs_name())
plt.savefig(os.path.join(dirname, "{}_hard.png".format(events[i].fs_name())))
plt.close("all")
y_pred = gbc.predict(X_test_s)
mse = mean_squared_error(y_test_s, y_pred)
I = y_test_s.ravel().argsort()
#print I
#print y_test
plt.plot(y_pred.ravel()[I], label="predicted")
#plt.plot(np.sort(y_test.ravel()), label="truth")
plt.plot(y_test_s.ravel()[I], label="truth")
#plt.legend()
plt.gcf().suptitle(events[i].fs_name())
plt.savefig(os.path.join(dirname, "{}_soft.png".format(events[i].fs_name())))
plt.close("all")
plt.hist(y_test_s.ravel() - y_pred.ravel(), bins=25)
plt.gcf().suptitle(events[i].fs_name()+" residuals")
plt.savefig(os.path.join(dirname, "{}_res.png".format(events[i].fs_name())))
plt.close("all")
results.append(
{"name": "gbc lr_{} est_{} dep_{}".format(lr, est, max_depth),
"event": events[i].fs_name(),
"mse": mse,
#"pos prec": prec[1],
#"pos recall": rec[1],
#"pos fscore": fscore[1],
#"pos support": sup[1],
})
print results[-1]["name"]
df = pd.DataFrame(results)
#u_df = df[["name", "pos prec", "pos recall", "pos fscore"]].groupby("name").agg("mean")
u_df = df[["name", "mse"]].groupby("name").agg("mean")
print u_df
print
idx = u_df["mse"].argmax()
print u_df.loc[idx]
| apache-2.0 |
pratapvardhan/scikit-learn | examples/text/document_classification_20newsgroups.py | 37 | 10499 | """
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how scikit-learn can be used to classify documents
by topics using a bag-of-words approach. This example uses a scipy.sparse
matrix to store the features and demonstrates various classifiers that can
efficiently handle sparse matrices.
The dataset used in this example is the 20 newsgroups dataset. It will be
automatically downloaded, then cached.
The bar plot indicates the accuracy, training time (normalized) and test time
(normalized) of each classifier.
"""
# Author: Peter Prettenhofer <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)),
('classification', LinearSVC())
])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='navy')
plt.barh(indices + .3, training_time, .2, label="training time",
color='c')
plt.barh(indices + .6, test_time, .2, label="test time", color='darkorange')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
| bsd-3-clause |
has2k1/plotnine | plotnine/positions/position_stack.py | 1 | 2953 | from warnings import warn
import numpy as np
import pandas as pd
from ..exceptions import PlotnineWarning
from ..utils import remove_missing
from .position import position
class position_stack(position):
"""
Stack plotted objects on top of each other
The objects to stack are those that have
an overlapping x range.
"""
fill = False
def __init__(self, vjust=1, reverse=False):
self.params = {'vjust': vjust,
'reverse': reverse}
def setup_params(self, data):
"""
Verify, modify & return a copy of the params.
"""
# Variable for which to do the stacking
if 'ymax' in data:
if any((data['ymin'] != 0) & (data['ymax'] != 0)):
warn("Stacking not well defined when not "
"anchored on the axis.", PlotnineWarning)
var = 'ymax'
elif 'y' in data:
var = 'y'
else:
warn("Stacking requires either ymin & ymax or y "
"aesthetics. Maybe you want position = 'identity'?",
PlotnineWarning)
var = None
params = self.params.copy()
params['var'] = var
params['fill'] = self.fill
return params
def setup_data(self, data, params):
if not params['var']:
return data
if params['var'] == 'y':
data['ymax'] = data['y']
elif params['var'] == 'ymax':
bool_idx = data['ymax'] == 0
data.loc[bool_idx, 'ymax'] = data.loc[bool_idx, 'ymin']
data = remove_missing(
data,
vars=('x', 'xmin', 'xmax', 'y'),
name='position_stack')
return data
@classmethod
def compute_panel(cls, data, scales, params):
if not params['var']:
return data
negative = data['ymax'] < 0
neg = data.loc[negative]
pos = data.loc[~negative]
if len(neg):
neg = cls.collide(neg, params=params)
if len(pos):
pos = cls.collide(pos, params=params)
data = pd.concat([neg, pos], axis=0, ignore_index=True, sort=True)
return data
@staticmethod
def strategy(data, params):
"""
Stack overlapping intervals.
Assumes that each set has the same horizontal position
"""
vjust = params['vjust']
y = data['y'].copy()
y[np.isnan(y)] = 0
heights = np.append(0, y.cumsum())
if params['fill']:
heights = heights / np.abs(heights[-1])
data['ymin'] = np.min([heights[:-1], heights[1:]], axis=0)
data['ymax'] = np.max([heights[:-1], heights[1:]], axis=0)
# less intuitive than (ymin + vjust(ymax-ymin)), but
# this way avoids subtracting numbers of potentially
# similar precision
data['y'] = ((1-vjust)*data['ymin'] + vjust*data['ymax'])
return data
| gpl-2.0 |
RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/sklearn/ensemble/tests/test_forest.py | 8 | 28031 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import product
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.validation import check_random_state
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
"""Check consistency on dataset iris."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
"""Check consistency on dataset boston house prices."""
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
"""Regression models should not have a classes_ attribute."""
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
"""Predict probabilities."""
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importance(name, X, y):
"""Check variable importances."""
ForestClassifier = FOREST_CLASSIFIERS[name]
for n_jobs in [1, 2]:
clf = ForestClassifier(n_estimators=10, n_jobs=n_jobs)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
X_new = clf.transform(X, threshold="mean")
assert_less(0 < X_new.shape[1], X.shape[1])
# Check with sample weights
sample_weight = np.ones(y.shape)
sample_weight[y == 1] *= 100
clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
importances = clf.feature_importances_
assert_true(np.all(importances >= 0.0))
clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0)
clf.fit(X, y, sample_weight=3 * sample_weight)
importances_bis = clf.feature_importances_
assert_almost_equal(importances, importances_bis)
def test_importances():
X, y = datasets.make_classification(n_samples=1000, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name in FOREST_CLASSIFIERS:
yield check_importance, name, X, y
def check_oob_score(name, X, y, n_estimators=20):
"""Check that oob prediction is a good estimation of the generalization
error."""
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
"""Check that base trees can be grid-searched."""
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
"""Check pickability."""
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
"""Check estimators on multi-output problems."""
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
"""Test that n_classes_ and classes_ have proper shape."""
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
'''
Test that the `sparse_output` parameter of RandomTreesEmbedding
works by returning a dense array.
'''
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
'''
Test that the `sparse_output` parameter of RandomTreesEmbedding
works by returning the same array for both argument
values.
'''
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name, X, y):
"""Test precedence of max_leaf_nodes over max_depth. """
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name, X, y
def check_min_samples_leaf(name, X, y):
"""Test if leaves contain more than leaf_count training examples"""
ForestEstimator = FOREST_ESTIMATORS[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
est = ForestEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def test_min_samples_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name, X, y
def check_min_weight_fraction_leaf(name, X, y):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
if isinstance(est, (RandomForestClassifier,
RandomForestRegressor)):
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name, X, y
def check_memory_layout(name, dtype):
"""Check that it works no matter the memory layout"""
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
def test_1d_input():
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_warm_start(name, random_state=42):
"""Test if fitting incrementally with warm start gives a forest of the
right size and the same results as a normal fit."""
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
"""Test if fit clears state and grows a new forest when warm_start==False.
"""
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
"""Test if warm start second fit with smaller n_estimators raises error."""
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
"""Test if warm start with equal n_estimators does nothing and returns the
same forest and raises a warning."""
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
"""Test that the warm start computes oob score when asked."""
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
if __name__ == "__main__":
import nose
nose.runmodule()
| bsd-3-clause |
njchiang/task-fmri-utils | fmri_core/cross_searchlight.py | 1 | 13150 | """
Custom version of searchlight for cross-classification
The searchlight is a widely used approach for the study of the
fine-grained patterns of information in fMRI analysis, in which
multivariate statistical relationships are iteratively tested in the
neighborhood of each location of a domain.
"""
# Authors : Vincent Michel ([email protected])
# Alexandre Gramfort ([email protected])
# Philippe Gervais ([email protected])
#
# License: simplified BSD
import time
import sys
import warnings
from distutils.version import LooseVersion
import numpy as np
import sklearn
from sklearn.externals.joblib import Parallel, delayed, cpu_count
from sklearn import svm
from sklearn.base import BaseEstimator
from sklearn.model_selection import cross_val_score, permutation_test_score
from nilearn import masking
from nilearn.image.resampling import coord_transform
from nilearn.input_data.nifti_spheres_masker import _apply_mask_and_get_affinity
from nilearn._utils.compat import _basestring
# from nilearn._utils.fixes import cross_val_score
ESTIMATOR_CATALOG = dict(svc=svm.LinearSVC, svr=svm.SVR)
def search_light(X, y, estimator, A, groups=None, scoring=None,
cv=None, n_jobs=-1, verbose=0, permutations=0, random_state=42):
"""Function for computing a search_light
Parameters
----------
X : array-like of shape at least 2D
data to fit.
y : array-like
target variable to predict.
estimator : estimator object implementing 'fit'
object to use to fit the data
A : scipy sparse matrix.
adjacency matrix. Defines for each feature the neigbhoring features
following a given structure of the data.
groups : array-like, optional
group label for each sample for cross validation. default None
NOTE: will have no effect for scikit learn < 0.18
scoring : string or callable, optional
The scoring strategy to use. See the scikit-learn documentation
for possible values.
If callable, it taks as arguments the fitted estimator, the
test data (X_test) and the test target (y_test) if y is
not None.
cv : cross-validation generator, optional
A cross-validation generator. If None, a 3-fold cross
validation is used or 3-fold stratified cross-validation
when y is supplied.
n_jobs : int, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : int, optional
The verbosity level. Defaut is 0
Returns
-------
scores : array-like of shape (number of rows in A)
search_light scores
"""
# TODO : figure out hwat's going on here...
group_iter = GroupIterator(A.shape[0], n_jobs)
scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_group_iter_search_light)(
A.rows[list_i],
estimator, X, y, groups, scoring, cv,
thread_id + 1, A.shape[0], verbose, permutations, random_state)
for thread_id, list_i in enumerate(group_iter))
return np.concatenate(scores)
class GroupIterator(object):
"""Group iterator
Provides group of features for search_light loop
that may be used with Parallel.
Parameters
----------
n_features : int
Total number of features
n_jobs : int, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'. Defaut is 1
"""
def __init__(self, n_features, n_jobs=1):
self.n_features = n_features
if n_jobs == -1:
n_jobs = cpu_count()
self.n_jobs = n_jobs
def __iter__(self):
split = np.array_split(np.arange(self.n_features), self.n_jobs)
for list_i in split:
yield list_i
def _group_iter_search_light(list_rows, estimator, X, y, groups,
scoring, cv, thread_id, total, verbose=0,
permutations=0, random_state=42):
"""Function for grouped iterations of search_light
Parameters
-----------
list_rows : array of arrays of int
adjacency rows. For a voxel with index i in X, list_rows[i] is the list
of neighboring voxels indices (in X).
estimator : estimator object implementing 'fit'
object to use to fit the data
X : array-like of shape at least 2D
data to fit.
y : array-like
target variable to predict.
groups : array-like, optional
group label for each sample for cross validation.
NOTE: will have no effect for scikit learn < 0.18
scoring : string or callable, optional
Scoring strategy to use. See the scikit-learn documentation.
If callable, takes as arguments the fitted estimator, the
test data (X_test) and the test target (y_test) if y is
not None.
cv : cross-validation generator, optional
A cross-validation generator. If None, a 3-fold cross validation is
used or 3-fold stratified cross-validation when y is supplied.
thread_id : int
process id, used for display.
total : int
Total number of voxels, used for display
verbose : int, optional
The verbosity level. Defaut is 0
Returns
-------
par_scores : numpy.ndarray
score for each voxel. dtype: float64.
"""
if permutations > 0:
par_scores = np.zeros((len(list_rows), permutations + 1))
else:
if groups is not None:
par_scores = np.zeros((len(list_rows), len(set(groups))))
else:
par_scores = np.zeros(len(list_rows))
t0 = time.time()
for i, row in enumerate(list_rows):
kwargs = dict()
if not LooseVersion(sklearn.__version__) < LooseVersion('0.15'):
kwargs['scoring'] = scoring
if LooseVersion(sklearn.__version__) >= LooseVersion('0.18'):
kwargs['groups'] = groups
elif scoring is not None:
warnings.warn('Scikit-learn version is too old. '
'scoring argument ignored', stacklevel=2)
if permutations > 0:
actual, perms, _ = permutation_test_score(estimator, X[:, row],
y, cv=cv, n_jobs=1, n_permutations=permutations,
random_state=random_state,
**kwargs)
par_scores[i] = np.hstack([actual, perms])
else:
if groups is not None:
par_scores[i] = cross_val_score(estimator, X[:, row],
y, cv=cv, n_jobs=1,
**kwargs)
else:
par_scores[i] = np.mean(cross_val_score(estimator, X[:, row],
y, cv=cv, n_jobs=1,
**kwargs))
if verbose > 0:
# One can't print less than each 10 iterations
step = 11 - min(verbose, 10)
if (i % step == 0):
# If there is only one job, progress information is fixed
if total == len(list_rows):
crlf = "\r"
else:
crlf = "\n"
percent = float(i) / len(list_rows)
percent = round(percent * 100, 2)
dt = time.time() - t0
# We use a max to avoid a division by zero
remaining = (100. - percent) / max(0.01, percent) * dt
sys.stderr.write(
"Job #%d, processed %d/%d voxels "
"(%0.2f%%, %i seconds remaining)%s"
% (thread_id, i, len(list_rows), percent, remaining, crlf))
return par_scores
##############################################################################
# Class for search_light #####################################################
##############################################################################
class SearchLight(BaseEstimator):
"""Implement search_light analysis using an arbitrary type of classifier.
Parameters
-----------
mask_img : Niimg-like object
See http://nilearn.github.io/manipulating_images/input_output.html
boolean image giving location of voxels containing usable signals.
process_mask_img : Niimg-like object, optional
See http://nilearn.github.io/manipulating_images/input_output.html
boolean image giving voxels on which searchlight should be
computed.
radius : float, optional
radius of the searchlight ball, in millimeters. Defaults to 2.
estimator : 'svr', 'svc', or an estimator object implementing 'fit'
The object to use to fit the data
n_jobs : int, optional. Default is -1.
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
scoring : string or callable, optional
The scoring strategy to use. See the scikit-learn documentation
If callable, takes as arguments the fitted estimator, the
test data (X_test) and the test target (y_test) if y is
not None.
cv : cross-validation generator, optional
A cross-validation generator. If None, a 3-fold cross
validation is used or 3-fold stratified cross-validation
when y is supplied.
verbose : int, optional
Verbosity level. Defaut is False
Notes
------
The searchlight [Kriegeskorte 06] is a widely used approach for the
study of the fine-grained patterns of information in fMRI analysis.
Its principle is relatively simple: a small group of neighboring
features is extracted from the data, and the prediction function is
instantiated on these features only. The resulting prediction
accuracy is thus associated with all the features within the group,
or only with the feature on the center. This yields a map of local
fine-grained information, that can be used for assessing hypothesis
on the local spatial layout of the neural code under investigation.
Nikolaus Kriegeskorte, Rainer Goebel & Peter Bandettini.
Information-based functional brain mapping.
Proceedings of the National Academy of Sciences
of the United States of America,
vol. 103, no. 10, pages 3863-3868, March 2006
"""
def __init__(self, mask_img, process_mask_img=None, radius=2.,
estimator='svc',
n_jobs=1, scoring=None, cv=None,
verbose=0):
self.mask_img = mask_img
self.process_mask_img = process_mask_img
self.radius = radius
self.estimator = estimator
self.n_jobs = n_jobs
self.scoring = scoring
self.cv = cv
self.verbose = verbose
def fit(self, imgs, y, groups=None, permutations=0, random_state=42):
"""Fit the searchlight
Parameters
----------
imgs : Niimg-like object
See http://nilearn.github.io/manipulating_images/input_output.html
4D image.
y : 1D array-like
Target variable to predict. Must have exactly as many elements as
3D images in img.
groups : array-like, optional
group label for each sample for cross validation. Must have
exactly as many elements as 3D images in img. default None
NOTE: will have no effect for scikit learn < 0.18
"""
# Get the seeds
process_mask_img = self.process_mask_img
if self.process_mask_img is None:
process_mask_img = self.mask_img
# Compute world coordinates of the seeds
process_mask, process_mask_affine = masking._load_mask_img(
process_mask_img)
process_mask_coords = np.where(process_mask != 0)
process_mask_coords = coord_transform(
process_mask_coords[0], process_mask_coords[1],
process_mask_coords[2], process_mask_affine)
process_mask_coords = np.asarray(process_mask_coords).T
X, A = _apply_mask_and_get_affinity(
process_mask_coords, imgs, self.radius, True,
mask_img=self.mask_img)
estimator = self.estimator
if isinstance(estimator, _basestring):
estimator = ESTIMATOR_CATALOG[estimator]()
scores = search_light(X, y, estimator, A, groups,
self.scoring, self.cv, self.n_jobs,
self.verbose, permutations=permutations, random_state=random_state)
if groups is not None:
scores_3D = []
for g in range(scores.shape[1]):
these_scores = np.zeros(process_mask.shape)
these_scores[process_mask] = scores[:, g]
scores_3D.append(these_scores)
scores_3D = np.stack(scores_3D, -1)
else:
scores_3D = np.zeros(process_mask.shape)
scores_3D[process_mask] = scores
self.scores_ = scores_3D
return self
| mit |
hrjn/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
sebchalmers/TrafficMHE | TrafficFlow.py | 1 | 25833 | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 16, 2014
@author:
Sebastien Gros
Assistant Professor
Department of Signals and Systems
Chalmers University of Technology
SE-412 96 Gteborg, SWEDEN
[email protected]
Python/casADi Module:
An MHE module for Freeway Traffic Incident Detection
Requires the installation of the open-source Python module casADi together with the NLP solver ipopt
Required version of CasADi: v1.7.x
"""
from casadi import *
from casadi.tools import *
#import math as math
import numpy as np
import scipy.special as sc_spec
from scipy import linalg
import scipy.io
import random as rand
import matplotlib.pyplot as plt
from pylab import matshow
from matplotlib import interactive
def assertList(var):
if not(isinstance(var,list)):
var = [var]
return var
#Solver Constructor (used in both Turbine and WindFarm class)
def _setSolver(self, V,Cost,g,P, Tol):
lbg = g()
ubg = g()
if ('IneqConst' in g.keys()):
lbg['IneqConst'] = -inf
ubg['IneqConst'] = 0
EqConst = list(veccat(g.i['EqConst' ]))
if 'IneqConst' in g.keys():
IneqConst = list(veccat(g.i['IneqConst']))
else:
IneqConst = []
nl = MXFunction(nlpIn(x=V,p=P),nlpOut(f=Cost,g=g))
nl.init()
# set-up solver
solver = IpoptSolver(nl)
solver.setOption("expand",True)
solver.setOption("print_level",0)
solver.setOption("hessian_approximation","exact")
solver.setOption("max_iter",1500)
solver.setOption("tol",Tol)
solver.setOption("linear_solver","ma27")
solver.init()
print "Solver constructed, Extract Hessian & gradient"
H = solver.hessLag()
H.init()
dg = solver.jacG()
dg.init()
gfunc = MXFunction([V,P],[g])
gfunc.init()
f = solver.gradF()
f.init()
Solver = {'solver': solver, 'H': H, 'f': f, 'dg': dg, 'g': gfunc, 'lbg': lbg, 'ubg': ubg, 'V': V, 'EqConst':EqConst, 'IneqConst':IneqConst}
return Solver
def _CreateQP(solver,V):
solver['H'].evaluate()
H = solver['H'].output()# + np.eye(solver['H'].output().shape[0])
solver['dg'].evaluate()
dg = solver['dg'].output()
QPsolver = NLPQPSolver(qpStruct(h=H.sparsity(),a=dg.sparsity()))
QPsolver.setOption({"nlp_solver":IpoptSolver, "nlp_solver_options": {"tol": 1e-8,"verbose": 0} })
QPsolver.init()
return QPsolver
def _CreateFunc(Input,Output):
Func = MXFunction(Input,Output)
Func.init()
return Func
class FreeWay:
def _StructBuilder(self, Vars):
EntryList = []
for key in Vars:
Struct = struct_ssym([key])
EntryList.append(entry(key, struct = Struct, repeat = self.NumSegment))
Struct = struct_msym(EntryList)
return Struct
def __init__(self, Path = [], Slacks = [], Meas = [], ExtParam = []):
print "------------------"
print "Initialize FreeWay"
print "------------------"
print "Load Data"
try:
self.Data = scipy.io.loadmat(Path)
except:
print "Unkown Data file, give path/name", Path
print "Build Symbolic variables"
self.NumSegment = self.Data['L'].shape[1]+1
self.DataTime = self.Data['vv_3lane'].shape[0]-1
VarList = {
'States': ['rho', 'v'],
'Inputs': ['r','f','alpha','beta', 'Ve']
}
GlobalParams = struct_ssym(['vfree', 'kappa', 'tau', 'a', 'rho_cr', 'eta', 'T', 'delta'])
Li = struct_ssym(['L'])
#Build structure to hold the data (will merge into EP)
if not(Meas == []):
self.Meas = self._StructBuilder(Meas)
else:
print "WARNING: NO DATA PROVIDED"
if not(Slacks == []):
# Slacks declared by the user (for constraints relaxation)
print "Slack variables detected:"
print Slacks
VarList['Slacks'] = Slacks
#Spatial Structure
self.VSpace = {}
self.VSpacePrev = {}
for var in VarList.keys():
self.VSpace[var] = self._StructBuilder(VarList[var])
self.VSpacePrev[var] = self._StructBuilder(VarList[var])
#Create Structure for External parameters
ParamList = [
entry('Global', struct = GlobalParams ),
entry('States0', struct = self.VSpace['States'] ),
entry('Inputs0', struct = self.VSpace['Inputs'] ),
entry('Dist', struct = Li, repeat = self.NumSegment )
]
self.Param = struct_msym(ParamList)
if not(ExtParam == []):
ExtParamList = []
for key in ExtParam:
Struct = struct_ssym([key])
ExtParamList.append(entry(key, struct = Struct, repeat = self.NumSegment ))
self.ExtParam = struct_msym(ExtParamList)
T = self.Param['Global', 'T']
a = self.Param['Global', 'a']
delta = self.Param['Global', 'delta']
kappa = self.Param['Global', 'kappa']
tau = self.Param['Global', 'tau']
vfree = self.Param['Global', 'vfree']
rho_cr = self.Param['Global','rho_cr']
eta = self.Param['Global', 'eta']
print "Build Stage-wise Dynamic Function"
self._Shoot = ['Boundary, dynamics n/a']
for i in range(1,self.NumSegment-1):
##Attribute spatial dynamic elements
rho_i = self.VSpace['States']['rho', i]
v_i = self.VSpace['States']['v', i]
rho_im = self.VSpace['States']['rho',i-1]
v_im = self.VSpace['States']['v', i-1]
rho_ip = self.VSpace['States']['rho',i+1]
v_ip = self.VSpace['States']['v', i+1]
q_im = rho_im*v_im
q_i = rho_i *v_i
r_i = self.VSpace['Inputs']['r', i]
f_i = self.VSpace['Inputs']['f', i]
beta_i = self.VSpace['Inputs']['beta', i]
alpha_i = self.VSpace['Inputs']['alpha',i]
Ve_i = self.VSpace['Inputs']['Ve', i]
Li = self.Param['Dist',i,'L']
#Construct Dynamics
Dyn = []
Dyn.append( rho_i + T*(q_im - q_i + r_i - f_i)/Li )
Dyn.append( v_i + T*(beta_i*Ve_i - v_i)/tau \
+ T*v_i*(v_im - v_i)/Li \
- beta_i*(1 - alpha_i)*(eta*T)*(rho_ip - rho_i)/(rho_i + kappa)/(tau*Li) )
Shoot = _CreateFunc([self.VSpace['Inputs'],self.VSpace['States'],self.Param],[veccat(Dyn)])
self._Shoot.append(Shoot)
self._Shoot.append('Boundary, dynamics n/a')
def _BuildFunc(self, Expr, Type):
X = self.VSpace['States']
U = self.VSpace['Inputs']
Xprev = self.VSpacePrev['States']
Uprev = self.VSpacePrev['Inputs']
#CAREFUL, THE INPUT SCHEME MUST MATCH THE FUNCTION CALLS !!
if Type == 'Terminal':
listFuncInput = [X, Xprev] #, Slacks; if applicable
else:
listFuncInput = [X, Xprev, U, Uprev] #, Slacks; if applicable
if ('Slacks' in self.VSpace.keys()):
listFuncInput.append(self.VSpace['Slacks'])#['Slacks'])
listFuncInput.append(self.Param)
listFuncInput.append(self.Meas)
if hasattr(self,'ExtParam'):
listFuncInput.append(self.ExtParam)
self.listFuncInput = listFuncInput
Func = MXFunction(listFuncInput,[Expr])
Func.init()
return Func
def setIneqConst(self, Const, Type = 'Stage'):
"""
Sets the Cost of your MHE problem. Send in a symbolic expression constructed from .VSpace symbols as argument.
Specify Terminal = True if this is your Terminal constraint.
"""
if not(isinstance(Const,list)):
Const = [Const]
#ConstFunc = self._BuildFunc(veccat(Const), Terminal)
if (Type == 'Stage'):
self._StageConst = self._BuildFunc(veccat(Const), Type)
if (Type == 'Arrival'):
self._ArrivalConst = self._BuildFunc(veccat(Const), Type)
if (Type == 'Terminal'):
self._TerminalConst = self._BuildFunc(veccat(Const), Type)
def setCost(self, Expr, Type = 'Stage'):
"""
Sets the Cost of your MHE problem. Send in a symbolic expression constructed from .VSpace symbols as argument.
Specify Arrival = True if this is your arrival cost.
"""
if (Type == 'Stage'):
self._StageCost = self._BuildFunc(Expr, Type)
if (Type == 'Arrival'):
self._ArrivalCost = self._BuildFunc(Expr, Type)
if (Type == 'Terminal'):
self._TerminalCost = self._BuildFunc(Expr, Type)
def _AddCostAndConst(self, Cost, IneqConst, k):
StageInputList = []
AddCost = 0
if (k==0):
StageInputList.append(self.V['States', k ])
StageInputList.append(self.EP['Param','States0' ])
StageInputList.append(self.V[ 'Inputs', k ])
StageInputList.append(self.EP['Param','Inputs0' ])
if (k > 0) and (k < self.Horizon-1):
StageInputList.append(self.V['States', k ])
StageInputList.append(self.V['States', k-1 ])
StageInputList.append(self.V['Inputs', k ])
StageInputList.append(self.V['Inputs', k-1 ])
if (k == self.Horizon-1):
StageInputList.append(self.V['States', k ])
StageInputList.append(self.V['States', k-1 ])
if ('Slacks' in self.VSpace.keys()):
StageInputList.append(self.V['Slacks',k])
StageInputList.append(self.EP['Param'])
StageInputList.append(self.EP['Meas', k])
if ('ExtParam' in self.EP.keys()):
StageInputList.append(self.EP['ExtParam',k])
if k == 0:
if hasattr(self, '_ArrivalCost'):
print "Arrival Cost Detected, build."
[AddCost ] = self._ArrivalCost.call( StageInputList)
Cost += AddCost
if hasattr(self, '_ArrivalConst'):
print "Arrival Constraint Detected, build."
[AppendConst] = self._ArrivalConst.call(StageInputList)
IneqConst.append( AppendConst )
if k == self.Horizon-1:
if hasattr(self, '_TerminalCost'):
print "Terminal Cost Detected, build."
[AddCost ] = self._TerminalCost.call( StageInputList)
Cost += AddCost
if hasattr(self, '_TerminalConst'):
print "Terminal Constraint Detected, build."
[AppendConst] = self._TerminalConst.call(StageInputList)
IneqConst.append( AppendConst )
if (k < self.Horizon-1) and (k > 0):
if hasattr(self, '_StageCost'):
print "Stage Cost Detected, build."
[AddCost ] = self._StageCost.call( StageInputList)
Cost += AddCost
if hasattr(self, '_StageConst'):
print "Stage Constraint Detected, build."
[AppendConst] = self._StageConst.call( StageInputList)
IneqConst.append( AppendConst )
return Cost, IneqConst
def _CreateEPStruct(self, TimeHorizon):
EPList = [
entry('Param', struct = self.Param),
entry('Meas', struct = self.Meas, repeat = TimeHorizon)
]
if hasattr(self,'ExtParam'):
EPList.append(entry('ExtParam', struct = self.ExtParam, repeat = TimeHorizon))
EP = struct_msym(EPList)
return EP
def BuildMHE(self, Horizon = 1, SimTime = 1, Tol = 1e-3):
"""
Construct the MHE problem. Specify your horizon lengths as Horizon = ...
"""
if (Horizon > 0):
self.Horizon = Horizon
else:
print "Warning, no horizon specified. Default value is 1 !!"
self.SimTime = SimTime + Horizon
NumElement = {'States': Horizon, 'Inputs' : Horizon-1}
if ('Slacks' in self.VSpace.keys()):
NumElement['Slacks'] = Horizon
print "Build Freeway Variable Structures"
VarStruct = []
for var in NumElement.keys():
VarStruct.append( entry(var, struct = self.VSpace[var], repeat = NumElement[var] ))
SimStruct = []
for var in NumElement.keys():
SimStruct.append( entry(var, struct = self.VSpace[var], repeat = self.SimTime ))
self.V = struct_msym(VarStruct)
self.VSim = struct_msym(SimStruct)
self.EP = self._CreateEPStruct(self.Horizon)
self.EPSim = self._CreateEPStruct(self.SimTime)
#Construct Dynamic Constraints
print "Building Dynamic Constraints"
EquConst = []
for k in range(0,Horizon-1):
#print "Time", k
for i in range(1,self.NumSegment-1):
#print "Segment", i
[Xplus] = self._Shoot[i].call([self.V['Inputs',k],self.V['States',k],self.EP['Param']])
self.Xplus = Xplus
EquConst.append( veccat(self.V['States',k+1,...,i]) - Xplus )
#Construct Stage Cost & Inequality Constraints
print "Building Cost & Inequality Constraints"
IneqConst = []
Cost = 0
for k in range(Horizon):
Cost, IneqConst = self._AddCostAndConst(Cost, IneqConst, k)
print "\n"
print "------------------"
print "Construct Solvers"
print "------------------"
print "\n"
print "NLP"
gList = [entry('EqConst', expr = EquConst)]
if not(IneqConst == []):
gList.append(entry('IneqConst', expr = IneqConst))
self.g = struct_MX(gList)
##Setup Central Solver
self.Solver = _setSolver(self, self.V, Cost, self.g, self.EP, Tol = Tol)
self.gfunc = MXFunction([self.V, self.EP],[self.g])
self.gfunc.init()
self.Costfunc = MXFunction([self.V, self.EP],[Cost])
self.Costfunc.init()
##Setup Central Solver
print "QP"
self.QPSolver = _CreateQP(self.Solver, self.V)
print "\n"
print "Construction Terminated."
def PrepareQP(self, solver = [], Primal = [], Adjoint = [], EP = [], lbV = [], ubV = []):
solver['H'].setInput(Primal, 0)
solver['H'].setInput(EP ,1)
solver['H'].setInput(1. ,2)
solver['H'].setInput(Adjoint,3)
solver['H'].evaluate()
solver['f'].setInput(Primal, 0)
solver['f'].setInput(EP, 1)
solver['f'].evaluate()
solver['dg'].setInput(Primal, 0)
solver['dg'].setInput(EP, 1)
solver['dg'].evaluate()
solver['g'].setInput(Primal, 0)
solver['g'].setInput(EP, 1)
solver['g'].evaluate()
QP = {
'H' : DMatrix(solver[ 'H'].output()),
'f' : DMatrix(solver[ 'f'].output()),
'dg' : DMatrix(solver['dg'].output()),
'g' : DMatrix(solver[ 'g'].output()),
'lbX': DMatrix( lbV.cat - Primal.cat),
'ubX': DMatrix( ubV.cat - Primal.cat),
'lbg': DMatrix(solver['lbg'].cat - solver['g'].output()),
'ubg': DMatrix(solver['ubg'].cat - solver['g'].output())
}
return QP
def PlotSparsity(self, solver):
"""
Call in with your T.Solver as argument.
"""
solver['H'].evaluate()
H = solver['H'].output()
solver['dg'].evaluate()
dg = solver['dg'].output()
plt.figure(1)
plt.spy(np.concatenate([H,dg.T],axis=1))
plt.show()
def _setParameters(self, EP = [], ExtParam = []):
"""
Sets the FreeWay parameters. Call in with a Dictionary of your external parameters.
Segment lengths are automatically assigned from your specified data file.
"""
for key in ExtParam.keys():
EP['Param','Global',key] = ExtParam[key]
for i in range(self.Data['L'].shape[1]):
EP['Param','Dist',i,'L'] = self.Data['L'][0,i]
return EP
def _AssignData(self, Simulated, Data = 0, VStruct = [], EPStruct = [], ExtParam = []):
TimeRange = len(VStruct['States',:,...,veccat])
SegmentList = range(self.NumSegment)
EPStruct = self._setParameters(EP = EPStruct, ExtParam = ExtParam)
#Assign Measurements
DataDic = {'v': ['vv_3lane', 1.], 'rho': ['rr_3lane', 3.]}
for k in range(TimeRange):
for i in SegmentList:
for key in DataDic.keys():
EPStruct['Meas', k,key,i] = Data[DataDic[key][0]][k, i]/DataDic[key][1]
VStruct['States',k,key,i] = Data[DataDic[key][0]][k, i]/DataDic[key][1]
#Assign accident-free values
VStruct['Inputs',:,'alpha',:] = 0.
VStruct['Inputs',:,'beta',:] = 1.
for key in ['rho','v']:
EPStruct['Meas',:,key,:] = VStruct['States',:,key,:]
return VStruct, EPStruct
def GenerateData(self, VStruct = [], EPStruct = [], ExtParam = [], TimeRange = 0, Simulated = False, AddNoise = False):
"""
Generate data for testing purposes
"""
print "Assign Data"
StateNoise = 0
Data, EP = self._AssignData(Simulated, Data = self.Data, VStruct = VStruct, EPStruct = EPStruct, ExtParam = ExtParam)
if AddNoise:
rhoMean = np.mean(Data['States',:,'rho',:])
vMean = np.mean(Data['States',:,'v',:])
if TimeRange == 0:
TimeRange = len(Data['States',:,...,veccat])
if Simulated:
print "Construct Simulated Data"
FaultySegment = 3
#Assign parameters
vfree = EP['Param','Global', 'vfree' ]
a = EP['Param','Global', 'a' ]
rho_cr = EP['Param','Global', 'rho_cr' ]
for k in range(TimeRange-1):
#Generate accident
if k in range(360,840):
Data['Inputs',k,'alpha',FaultySegment] = np.min([Data['Inputs',k-1,'alpha',FaultySegment] + 0.01, 0.3])
Data['Inputs',k, 'beta',FaultySegment] = np.max([Data['Inputs',k-1, 'beta',FaultySegment] - 0.01, 0.7])
#Forward Simulation
for i in range(1,self.NumSegment-1):
#Assign Ve
Ve_arg = (1 + Data['Inputs',k,'alpha',i])*Data['States',k,'rho',i]
Data['Inputs',k,'Ve',i] = vfree*exp(-((Ve_arg/rho_cr)**a)/a)
for index, setInput in enumerate([Data['Inputs',k],Data['States',k],EP['Param']]):
self._Shoot[i].setInput(setInput,index)
self._Shoot[i].evaluate()
if AddNoise:
StateNoise = np.array([
rand.normalvariate(0,1e-3*rhoMean),
rand.normalvariate(0,1e-3*vMean)
])
Data['States',k+1,...,i] = list(self._Shoot[i].output() + StateNoise)
for key in ['rho','v']:
EP['Meas',:,key,:] = Data['States',:,key,:]
#Assign Ve on the boundaries (for consistency)
for k in range(TimeRange-1):
for i in [0,self.NumSegment-1]:
Ve_arg = (1 + Data['Inputs',k,'alpha',i])*Data['States',k,'rho',i]
Data['Inputs',k,'Ve',i] = vfree*exp(-((Ve_arg/rho_cr)**a)/a)
#Assign initial conditions
for key in ['Inputs','States']:
EP['Param',key+'0'] = Data[key,0]
print "Done"
return Data, EP
def PassMHEData(self,Data = [], EP = [], time = 0, ExtParam = []):
"""
Assign data to the MHE solver
"""
initMHE = self.V()
EPMHE = self._setParameters(EP = self.EP(), ExtParam = ExtParam)
for key in ['States0','Inputs0']:
EPMHE['Param',key] = EP['Param',key]
for k in range(self.Horizon):
initMHE['States',k,...] = Data['States',time+k,...]
EPMHE['Meas',k,...,:] = EP['Meas',time+k,...,:]
for k in range(self.Horizon-1):
initMHE['Inputs',k,...] = Data['Inputs',time+k,...]
#Set variable bounds & initial guess
lbV = self.V(-inf)
ubV = self.V( inf)
#Collapsed variables
for key in ['f','r']:
lbV['Inputs',:,key,:] = 0.
ubV['Inputs',:,key,:] = 0.
#Bounds on alpha, beta
for key in ['alpha', 'beta']:
lbV['Inputs',:,key,:] = 0
ubV['Inputs',:,key,:] = 1
if ('Slacks' in lbV.keys()):
lbV['Slacks'] = 0.
lbV['States'] = 0
lbV['Inputs',:,'Ve',:] = 0
ubV['Inputs',:,'Ve',:] = 130.
ubV['States',:,'v',:] = 130.
ubV['States',:,'rho',:] = 90.
ubV['Slacks'] = 1.
#for i in range(self.NumSegment):
# for k in range(self.Horizon-1):
# VeBound = np.mean(self.Data['VMS_d'][time+k,3*i:3*i+3])
# if np.isnan(VeBound):
# VeBound = 200.
# ubV['Inputs',k,'Ve',i] = VeBound
return initMHE, EPMHE, lbV, ubV
def SolveMHE(self, EP = [], lbV = [], ubV = [], init = []):
print "Solve Instance of MHE"
self.Solver['solver'].setInput(init, "x0")
self.Solver['solver'].setInput(self.Solver['lbg'], "lbg")
self.Solver['solver'].setInput(self.Solver['ubg'], "ubg")
self.Solver['solver'].setInput(lbV, "lbx")
self.Solver['solver'].setInput(ubV, "ubx")
self.Solver['solver'].setInput(EP, "p")
self.Solver['solver'].solve()
self._lbg = self.Solver['lbg']
self._ubg = self.Solver['ubg']
Adjoints = self.g(np.array(self.Solver['solver'].output('lam_g')))
Primal = self.V(np.array(self.Solver['solver'].output('x')))
Cost = self.Solver['solver'].output('f')
Status = int(self.Solver['solver'].getStats()['return_status'] == 'Solve_Succeeded')
return Primal, Adjoints, Cost, Status
def Shift(self, Primal, EP):
#Assign the previous 1st state and input to EP
EPShifted = EP
EPShifted['Param','States0'] = Primal['States',1]
EPShifted['Param','Inputs0'] = Primal['Inputs',1]
#Primal Shift
PrimalShifted = self.V(Primal)
PrimalShifted[...,:-1] = Primal[...,1:]
PrimalShifted[...,-1] = Primal[...,-1]
#Last step completion
for i in range(1,self.NumSegment-1):
for index, setInput in enumerate([Primal['Inputs',-1],Primal['States',-1],EP['Param']]):
self._Shoot[i].setInput(setInput,index)
self._Shoot[i].evaluate()
PrimalShifted['States',-1,...,i] = list(self._Shoot[i].output())
#Assign measurement to the boundary conditions
for i in [0,self.NumSegment-1]:
PrimalShifted['States',-1,...,i] = EP['Meas',-1,...,i]
return PrimalShifted, EPShifted
def Check(self, init = 0, EP = 0):
#Check initial guess
self.Costfunc.setInput(init, 0)
self.Costfunc.setInput(EP, 1)
self.Costfunc.evaluate()
Cost0 = self.Costfunc.output()
self.gfunc.setInput(init, 0)
self.gfunc.setInput(EP, 1)
self.gfunc.evaluate()
g0 = self.gfunc.output()
return Cost0, self.g(g0)
| gpl-2.0 |
DonBeo/statsmodels | docs/sphinxext/ipython_directive.py | 30 | 27623 | # -*- coding: utf-8 -*-
"""Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives).
By default this directive assumes that your prompts are unchanged IPython ones,
but this can be customized. The configurable options that can be placed in
conf.py are
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import cStringIO
import os
import re
import sys
import tempfile
import ast
import time
# To keep compatibility with various python versions
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Third-party
import matplotlib
import sphinx
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.util.compat import Directive
matplotlib.use('Agg')
# Our own
from IPython import Config, InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = range(3)
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
#handle try/except blocks. only catch outer except
if re.match(continuation + '\sexcept:', nextline):
inputline += '\n' + nextline[Nc+1:]
else:
inputline += '\n' + nextline[Nc:]
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self):
self.cout = cStringIO.StringIO()
# Create config object for IPython
config = Config()
config.Global.display_banner = False
config.Global.exec_lines = ['import numpy as np',
'from pylab import *'
]
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize ipython, but don't start its mainloop
IP = InteractiveShell.instance(config=config, profile_dir=profile)
# io.stdout redirect must be done *after* instantiating InteractiveShell
io.stdout = self.cout
io.stderr = self.cout
# For debugging, so we can see normal output, use this:
#from IPython.utils.io import Tee
#io.stdout = Tee(self.cout, channel='stdout') # dbg
#io.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
#print "input='%s'"%self.input
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
source_raw = splitter.source_raw_reset()[1]
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
outfile = os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""Process data block for INPUT token."""
decorator, input, rest = data
image_file = None
image_directive = None
#print 'INPUT:', data # dbg
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = decorator=='@doctest' or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i==0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=True)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=True)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output)
elif is_semicolon: # get spacing right
ret.append('')
self.cout.truncate(0)
return (ret, input_lines, output, is_doctest, image_file,
image_directive)
#print 'OUTPUT', output # dbg
def process_output(self, data, output_prompt,
input_lines, output, is_doctest, image_file):
"""Process data block for OUTPUT token."""
if is_doctest:
submitted = data.strip()
found = output
if found is not None:
found = found.strip()
# XXX - fperez: in 0.11, 'output' never comes with the prompt
# in it, just the actual output text. So I think all this code
# can be nuked...
# the above comment does not appear to be accurate... (minrk)
ind = found.find(output_prompt)
if ind<0:
e='output prompt="%s" does not match out line=%s' % \
(output_prompt, found)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
if found!=submitted:
e = ('doctest failure for input_lines="%s" with '
'found_output="%s" and submitted output="%s"' %
(input_lines, found, submitted) )
raise RuntimeError(e)
#print 'doctest PASSED for input_lines="%s" with found_output="%s" and submitted output="%s"'%(input_lines, found, submitted)
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = 'plt.gcf().savefig("%s")'%image_file
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin%lineno
output_prompt = self.promptout%lineno
image_file = None
image_directive = None
for token, data in block:
if token==COMMENT:
out_data = self.process_comment(data)
elif token==INPUT:
(out_data, input_lines, output, is_doctest, image_file,
image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token==OUTPUT:
out_data = \
self.process_output(data, output_prompt,
input_lines, output, is_doctest,
image_file)
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
if self._pyplot_imported:
return
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive conent
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
block = '\n'.join(content)
# remove blank lines
block = re.sub('\n+', '\n', block)
content = block.split('\n')
# if any figures, make sure you can handle them and no other figures exist
if re.search('^\s*@savefig', block, flags=re.MULTILINE):
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
# sub out the pseudo-decorators so we can parse
block = re.sub('@(?=[savefig|suppress|verbatim|doctest])', '#@', block)
# this is going to raise an error if there's problems
# in the python. if you want errors, make an ipython block
parsed_block = ast.parse(block)
in_lines = [i.lineno for i in parsed_block.body]
output = []
ct = 1
for lineno, line in enumerate(content):
line_stripped = line.strip('\n')
if lineno + 1 in in_lines: # this is an input line
modified = u"%s %s" % (fmtin % ct, line_stripped)
ct += 1
elif line.startswith('@'): # is it a decorator?
modified = line
else: # this is something else
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
modified = u'%s %s' % (continuation, line)
output.append(modified)
output = re.sub('#@(?=[savefig|suppress|verbatim|doctest])', '@',
'\n'.join(output)).split('\n')
# put blank lines after input lines
for i in in_lines[1:][::-1]:
output.insert(i-1, u'')
# fix the spacing for decorators
# might be a cleaner regex for
# \n@savefig name.png\n\n -> \n\n@savefig name.png\n
decpat1 = '(?<=@[savefig|suppress|verbatim|doctest])(?P<options>.+)\n\n'
output = re.sub(decpat1, '\g<options>\n','\n'.join(output))
decpat2 = '\n(?=@[savefig|suppress|verbatim|doctest])'
output = re.sub(decpat2, '\n\n', output).split('\n')
return output
class IpythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = { 'python': directives.unchanged,
'suppress' : directives.flag,
'verbatim' : directives.flag,
'doctest' : directives.flag,
}
shell = EmbeddedSphinxShell()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
confdir = self.state.document.settings.env.app.confdir
savefig_dir = config.ipython_savefig_dir
source_dir = os.path.dirname(self.state.document.current_source)
if savefig_dir is None:
savefig_dir = config.html_static_path
if isinstance(savefig_dir, list):
savefig_dir = savefig_dir[0] # safe to assume only one path?
savefig_dir = os.path.join(confdir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
return savefig_dir, source_dir, rgxin, rgxout, promptin, promptout
def setup(self):
# make a file in this directory, if there's already one
# if it's older than 5 minutes, delete it
# this needs a more robust solution
cur_dir = os.path.normpath(
os.path.join(self.state.document.settings.env.srcdir,
'..'))
tmp_file = os.path.join(cur_dir, 'seen_docs.temp')
if os.path.exists(tmp_file):
file_t = os.path.getmtime(tmp_file)
now_t = time.time()
if (now_t - file_t)/60. >= 5:
docs = []
os.remove(tmp_file)
else:
docs = open(tmp_file, 'r').read().split('\n')
if not self.state.document.current_source in docs:
self.shell.IP.history_manager.reset()
self.shell.IP.execution_count = 1
else: # haven't processed any docs yet
docs = []
# get config values
(savefig_dir, source_dir, rgxin,
rgxout, promptin, promptout) = self.get_config_options()
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
store_history=False)
self.shell.clear_cout()
# write the filename to a tempfile because it's been "seen" now
if not self.state.document.current_source in docs:
fout = open(tmp_file, 'a')
fout.write(self.state.document.current_source+'\n')
fout.close()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
#TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python(content)
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython','']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
lines.extend([' %s'%line for line in row.split('\n')])
if figure is not None:
figures.append(figure)
#text = '\n'.join(lines)
#figs = '\n'.join(figures)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
#print lines
if len(lines)>2:
if debug:
print '\n'.join(lines)
else: #NOTE: this raises some errors, what's it for?
#print 'INSERTING %d lines'%len(lines)
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
text = '\n'.join(lines)
txtnode = nodes.literal_block(text, text)
txtnode['language'] = 'ipython'
#imgnode = nodes.image(figs)
# cleanup
#self.teardown() # this gets called on _every_ exit from a block
return []#, imgnode]
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IpythonDirective)
app.add_config_value('ipython_savefig_dir', None, True)
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), True)
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), True)
app.add_config_value('ipython_promptin', 'In [%d]:', True)
app.add_config_value('ipython_promptout', 'Out[%d]:', True)
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
""",
]
# skip local-file depending first example:
examples = examples[1:]
#ipython_directive.DEBUG = True # dbg
#options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
ipython_directive('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print 'All OK? Check figures in _static/'
| bsd-3-clause |
Iceman121/radial_bar_chart | src/package/data/user_input.py | 1 | 1320 | # ==============================================================================
# Chapter 1: Importing Data Related Modules
# ==============================================================================
# For storing bar chart data
import pandas as po
# For path details
import os
# ==============================================================================
# Defining Attributes and Methods for Use
# ==============================================================================
# Get Path
path = os.getcwd()
# Get Files List
def list_files():
files_list = po.DataFrame(os.listdir(path+'/data/raw/'),
columns=['Files'])
files_list = files_list.reset_index()
files_list.columns = ['ID', 'Files']
return files_list
# Import Data
def import_data():
print('Available Files for import: ')
files_list = list_files()
print(files_list)
print()
# User Input for identifying which file to convert
choice = int(input('Please enter the file ID you want to convert: '))
file_name = str(files_list.ix[files_list['ID'] == choice,
'Files'].values[0])
print()
print("Importing Raw Data...")
data = po.read_csv(path + "/data/raw/"+file_name, header=0)
print(data.head())
print()
return data
| mit |
probml/pyprobml | scripts/logreg_poly_demo.py | 1 | 5022 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 27 10:08:08 2020
@author: kpmurphy
"""
# Fit logistic regression models to 2d data using polynomial features
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_classification, make_blobs
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LogisticRegression
import matplotlib.colors as mcol
import os
figdir = "../figures"
def save_fig(fname): plt.savefig(os.path.join(figdir, fname))
def plot_data(ax, X, y, is_train=True):
X0 = X[:,0]; X1 = X[:,1]
colors = [ 'red', 'blue']
if is_train:
markers = ['x', '*']
else:
markers = ['o', 's']
for x0, x1, cls in zip(X0, X1, y):
color = colors[int(cls)-1]
marker = markers[int(cls)-1]
ax.scatter(x0, x1, marker=marker, color=color)
ax.set_ylim(-2.75,2.75)
def plot_predictions(ax, xx, yy, transformer, model):
grid = np.c_[xx.ravel(), yy.ravel()]
grid2 = transformer.transform(grid)[:, 1:]
Z = model.predict(grid2).reshape(xx.shape)
ax.pcolormesh(xx, yy, Z, cmap=plt.cm.coolwarm, alpha=0.1)
#plt.axis('off')
def make_data(ntrain, ntest):
n = ntrain + ntest
X, y = make_classification(
n_samples=n, n_features=2, n_redundant=0,
n_classes=2, n_clusters_per_class=2,
class_sep=0.1, random_state=1)
X0, y0 = make_blobs(n_samples=[n, n], n_features=2,
cluster_std=2, random_state=1)
Xtrain = X[:ntrain, :]; ytrain = y[:ntrain]
Xtest = X[ntrain:, :]; ytest = y[ntrain:]
xmin = np.min(X[:,0]); xmax = np.max(X[:,0]);
ymin = np.min(X[:,1]); ymax = np.max(X[:,1]);
xx, yy = np.meshgrid(np.linspace(xmin, xmax, n), np.linspace(ymin, ymax, 200))
return Xtrain, ytrain, Xtest, ytest, xx, yy
ntrain = 50; ntest = 1000;
Xtrain, ytrain, Xtest, ytest, xx, yy = make_data(ntrain, ntest)
'''
degree_list = [1,2,4,6,8,10]
plot_list = degree_list
err_train_list = []
err_test_list = []
w_list = []
for i, degree in enumerate(degree_list):
transformer = PolynomialFeatures(degree)
name = 'Degree{}'.format(degree)
XXtrain = transformer.fit_transform(Xtrain)[:, 1:] # skip the first column of 1s
model = LogisticRegression(C=1e4)
model = model.fit(XXtrain, ytrain)
w = model.coef_[0]
w_list.append(w)
ytrain_pred = model.predict(XXtrain)
nerrors_train = np.sum(ytrain_pred != ytrain)
err_train_list.append(nerrors_train / ntrain)
XXtest = transformer.fit_transform(Xtest)[:, 1:] # skip the first column of 1s
ytest_pred = model.predict(XXtest)
nerrors_test = np.sum(ytest_pred != ytest)
err_test_list.append(nerrors_test / ntest)
if degree in plot_list:
fig, ax = plt.subplots()
plot_predictions(ax, xx, yy, transformer, model)
plot_data(ax, Xtrain, ytrain, is_train=True)
#plot_data(ax, Xtest, ytest, is_train=False)
ax.set_title(name)
fname = 'logreg_poly_surface-{}.png'.format(name)
save_fig(fname)
plt.draw()
plt.figure()
plt.plot(degree_list, err_train_list, 'x-', label='train')
plt.plot(degree_list, err_test_list, 'o-', label='test')
plt.legend()
plt.xlabel('polynomial degree')
plt.ylabel('error rate')
save_fig('logreg_poly_vs_degree.png')
for i in range(2):
print(w_list[i])
'''
### Try different strngth regularizers
degree = 4
# C =1/lambda, so large C is large variance is small regularization
C_list = np.logspace(0, 5, 7)
#C_list = [1, 10, 100, 200, 500, 1000]
plot_list = C_list
err_train_list = []
err_test_list = []
w_list = []
for i, C in enumerate(C_list):
transformer = PolynomialFeatures(degree)
name = 'Reg{:d}-Degree{}'.format(int(C), degree)
XXtrain = transformer.fit_transform(Xtrain)[:, 1:] # skip the first column of 1s
model = LogisticRegression(C=C)
model = model.fit(XXtrain, ytrain)
w = model.coef_[0]
w_list.append(w)
ytrain_pred = model.predict(XXtrain)
nerrors_train = np.sum(ytrain_pred != ytrain)
err_train_list.append(nerrors_train / ntrain)
XXtest = transformer.fit_transform(Xtest)[:, 1:] # skip the first column of 1s
ytest_pred = model.predict(XXtest)
nerrors_test = np.sum(ytest_pred != ytest)
err_test_list.append(nerrors_test / ntest)
if C in plot_list:
fig, ax = plt.subplots()
plot_predictions(ax, xx, yy, transformer, model)
plot_data(ax, Xtrain, ytrain, is_train=True)
#plot_data(ax, Xtest, ytest, is_train=False)
ax.set_title(name)
fname = 'logreg_poly_surface-{}.png'.format(name)
save_fig(fname)
plt.draw()
plt.figure()
plt.plot(C_list, err_train_list, 'x-', label='train')
plt.plot(C_list, err_test_list, 'o-', label='test')
plt.legend()
plt.xscale('log')
plt.xlabel('Inverse regularization')
plt.ylabel('error rate')
save_fig('logreg_poly_vs_reg-Degree{}.pdf'.format(degree))
| mit |
zuku1985/scikit-learn | sklearn/feature_extraction/tests/test_image.py | 38 | 11165 | # Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import scipy as sp
from scipy import ndimage
from numpy.testing import assert_raises
from sklearn.feature_extraction.image import (
img_to_graph, grid_to_graph, extract_patches_2d,
reconstruct_from_patches_2d, PatchExtractor, extract_patches)
from sklearn.utils.graph import connected_components
from sklearn.utils.testing import SkipTest, assert_equal, assert_true
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
def test_img_to_graph():
x, y = np.mgrid[:4, :4] - 10
grad_x = img_to_graph(x)
grad_y = img_to_graph(y)
assert_equal(grad_x.nnz, grad_y.nnz)
# Negative elements are the diagonal: the elements of the original
# image. Positive elements are the values of the gradient, they
# should all be equal on grad_x and grad_y
np.testing.assert_array_equal(grad_x.data[grad_x.data > 0],
grad_y.data[grad_y.data > 0])
def test_grid_to_graph():
# Checking that the function works with graphs containing no edges
size = 2
roi_size = 1
# Generating two convex parts with one vertex
# Thus, edges will be empty in _to_graph
mask = np.zeros((size, size), dtype=np.bool)
mask[0:roi_size, 0:roi_size] = True
mask[-roi_size:, -roi_size:] = True
mask = mask.reshape(size ** 2)
A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
assert_true(connected_components(A)[0] == 2)
# Checking that the function works whatever the type of mask is
mask = np.ones((size, size), dtype=np.int16)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
assert_true(connected_components(A)[0] == 1)
# Checking dtype of the graph
mask = np.ones((size, size))
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool)
assert_true(A.dtype == np.bool)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int)
assert_true(A.dtype == np.int)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask,
dtype=np.float64)
assert_true(A.dtype == np.float64)
def test_connect_regions():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
for thr in (50, 150):
mask = face > thr
graph = img_to_graph(face, mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def test_connect_regions_with_grid():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
mask = face > 50
graph = grid_to_graph(*face.shape, mask=mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
mask = face > 150
graph = grid_to_graph(*face.shape, mask=mask, dtype=None)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def _downsampled_face():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
face = face.astype(np.float32)
face = (face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2]
+ face[1::2, 1::2])
face = (face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2]
+ face[1::2, 1::2])
face = face.astype(np.float32)
face /= 16.0
return face
def _orange_face(face=None):
face = _downsampled_face() if face is None else face
face_color = np.zeros(face.shape + (3,))
face_color[:, :, 0] = 256 - face
face_color[:, :, 1] = 256 - face / 2
face_color[:, :, 2] = 256 - face / 4
return face_color
def _make_images(face=None):
face = _downsampled_face() if face is None else face
# make a collection of faces
images = np.zeros((3,) + face.shape)
images[0] = face
images[1] = face + 1
images[2] = face + 2
return images
downsampled_face = _downsampled_face()
orange_face = _orange_face(downsampled_face)
face_collection = _make_images(downsampled_face)
def test_extract_patches_all():
face = downsampled_face
i_h, i_w = face.shape
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_all_color():
face = orange_face
i_h, i_w = face.shape[:2]
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_all_rect():
face = downsampled_face
face = face[:, 32:97]
i_h, i_w = face.shape
p_h, p_w = 16, 12
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_max_patches():
face = downsampled_face
i_h, i_w = face.shape
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w), max_patches=100)
assert_equal(patches.shape, (100, p_h, p_w))
expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
patches = extract_patches_2d(face, (p_h, p_w), max_patches=0.5)
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
assert_raises(ValueError, extract_patches_2d, face, (p_h, p_w),
max_patches=2.0)
assert_raises(ValueError, extract_patches_2d, face, (p_h, p_w),
max_patches=-1.0)
def test_reconstruct_patches_perfect():
face = downsampled_face
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w))
face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
np.testing.assert_array_almost_equal(face, face_reconstructed)
def test_reconstruct_patches_perfect_color():
face = orange_face
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w))
face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
np.testing.assert_array_almost_equal(face, face_reconstructed)
def test_patch_extractor_fit():
faces = face_collection
extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
assert_true(extr == extr.fit(faces))
def test_patch_extractor_max_patches():
faces = face_collection
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(faces) * max_patches
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
max_patches = 0.5
expected_n_patches = len(faces) * int((i_h - p_h + 1) * (i_w - p_w + 1)
* max_patches)
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_max_patches_default():
faces = face_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(faces)
assert_equal(patches.shape, (len(faces) * 100, 19, 25))
def test_patch_extractor_all_patches():
faces = face_collection
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_color():
faces = _make_images(orange_face)
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_strided():
image_shapes_1D = [(10,), (10,), (11,), (10,)]
patch_sizes_1D = [(1,), (2,), (3,), (8,)]
patch_steps_1D = [(1,), (1,), (4,), (2,)]
expected_views_1D = [(10,), (9,), (3,), (2,)]
last_patch_1D = [(10,), (8,), (8,), (2,)]
image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]
expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]
image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]
expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]
image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
expected_views = expected_views_1D + expected_views_2D + expected_views_3D
last_patches = last_patch_1D + last_patch_2D + last_patch_3D
for (image_shape, patch_size, patch_step, expected_view,
last_patch) in zip(image_shapes, patch_sizes, patch_steps,
expected_views, last_patches):
image = np.arange(np.prod(image_shape)).reshape(image_shape)
patches = extract_patches(image, patch_shape=patch_size,
extraction_step=patch_step)
ndim = len(image_shape)
assert_true(patches.shape[:ndim] == expected_view)
last_patch_slices = [slice(i, i + j, None) for i, j in
zip(last_patch, patch_size)]
assert_true((patches[[slice(-1, None, None)] * ndim] ==
image[last_patch_slices].squeeze()).all())
def test_extract_patches_square():
# test same patch size for all dimensions
face = downsampled_face
i_h, i_w = face.shape
p = 8
expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
patches = extract_patches(face, patch_shape=p)
assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1],
p, p))
def test_width_patch():
# width and height of the patch should be less than the image
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(ValueError, extract_patches_2d, x, (4, 1))
assert_raises(ValueError, extract_patches_2d, x, (1, 4))
| bsd-3-clause |
fzalkow/scikit-learn | sklearn/neighbors/tests/test_ball_tree.py | 129 | 10192 | import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.ball_tree import (BallTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
rng = np.random.RandomState(10)
V = rng.rand(3, 3)
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=np.random.random(DIMENSION)),
'wminkowski': dict(p=3, w=np.random.random(DIMENSION)),
'mahalanobis': dict(V=V)}
DISCRETE_METRICS = ['hamming',
'canberra',
'braycurtis']
BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath']
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_ball_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
bt = BallTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = bt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_ball_tree_query_boolean_metrics():
np.random.seed(0)
X = np.random.random((40, 10)).round(0)
Y = np.random.random((10, 10)).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in BOOLEAN_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_discrete_metrics():
np.random.seed(0)
X = (4 * np.random.random((40, 10))).round(0)
Y = (4 * np.random.random((10, 10))).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in DISCRETE_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = bt.query_radius(query_pt, r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_ball_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = bt.query_radius(query_pt, r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_ball_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
bt = BallTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = bt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true,
atol=atol, rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
bt = BallTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old version of scipy, doesn't accept "
"explicit bandwidth.")
dens_bt = bt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_bt, dens_gkde, decimal=3)
def test_ball_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
bt = BallTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = bt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_ball_tree_pickle():
np.random.seed(0)
X = np.random.random((10, 3))
bt1 = BallTree(X, leaf_size=1)
# Test if BallTree with callable metric is picklable
bt1_pyfunc = BallTree(X, metric=dist_func, leaf_size=1, p=2)
ind1, dist1 = bt1.query(X)
ind1_pyfunc, dist1_pyfunc = bt1_pyfunc.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(bt1, protocol=protocol)
bt2 = pickle.loads(s)
s_pyfunc = pickle.dumps(bt1_pyfunc, protocol=protocol)
bt2_pyfunc = pickle.loads(s_pyfunc)
ind2, dist2 = bt2.query(X)
ind2_pyfunc, dist2_pyfunc = bt2_pyfunc.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1_pyfunc, ind2_pyfunc)
assert_array_almost_equal(dist1_pyfunc, dist2_pyfunc)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
def test_query_haversine():
np.random.seed(0)
X = 2 * np.pi * np.random.random((40, 2))
bt = BallTree(X, leaf_size=1, metric='haversine')
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine')
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
| bsd-3-clause |
CrazyGuo/bokeh | sphinx/source/docs/tutorials/exercises/stocks.py | 23 | 2098 | ###
### NOTE: This exercise requires a network connection
###
import numpy as np
import pandas as pd
from bokeh.plotting import figure, output_file, show, VBox
# Here is some code to read in some stock data from the Yahoo Finance API
AAPL = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=AAPL&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
MSFT = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=MSFT&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
IBM = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=IBM&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
output_file("stocks.html", title="stocks.py example")
# create a figure
p1 = figure(title="Stocks",
x_axis_label="Date",
y_axis_label="Close price",
x_axis_type="datetime")
p1.below[0].formatter.formats = dict(years=['%Y'],
months=['%b %Y'],
days=['%d %b %Y'])
# EXERCISE: finish this line plot, and add more for the other stocks. Each one should
# have a legend, and its own color.
p1.line(
AAPL['Date'], # x coordinates
AAPL['Adj Close'], # y coordinates
color='#A6CEE3', # set a color for the line
legend='AAPL', # attach a legend label
)
# EXERCISE: style the plot, set a title, lighten the gridlines, etc.
# EXERCISE: start a new figure
# Here is some code to compute the 30-day moving average for AAPL
aapl = AAPL['Adj Close']
aapl_dates = AAPL['Date']
window_size = 30
window = np.ones(window_size)/float(window_size)
aapl_avg = np.convolve(aapl, window, 'same')
# EXERCISE: plot a scatter of circles for the individual AAPL prices with legend
# 'close'. Remember to set the x axis type and tools on the first renderer
# EXERCISE: plot a line of the AAPL moving average data with the legeng 'avg'
# EXERCISE: style the plot, set a title, lighten the gridlines, etc.
show(VBox(p1, p2)) # open a browser
| bsd-3-clause |
dsm054/pandas | pandas/tests/frame/test_repr_info.py | 1 | 17663 | # -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import re
import sys
import textwrap
import numpy as np
import pytest
from pandas import (DataFrame, Series, compat, option_context,
date_range, period_range, Categorical)
from pandas.compat import StringIO, lrange, u, PYPY
import pandas.io.formats.format as fmt
import pandas as pd
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
# Segregated collection of methods that require the BlockManager internal data
# structure
class TestDataFrameReprInfoEtc(TestData):
def test_repr_empty(self):
# empty
foo = repr(self.empty) # noqa
# empty with index
frame = DataFrame(index=np.arange(1000))
foo = repr(frame) # noqa
def test_repr_mixed(self):
buf = StringIO()
# mixed
foo = repr(self.mixed_frame) # noqa
self.mixed_frame.info(verbose=False, buf=buf)
@pytest.mark.slow
def test_repr_mixed_big(self):
# big mixed
biggie = DataFrame({'A': np.random.randn(200),
'B': tm.makeStringIndex(200)},
index=lrange(200))
biggie.loc[:20, 'A'] = np.nan
biggie.loc[:20, 'B'] = np.nan
foo = repr(biggie) # noqa
def test_repr(self):
buf = StringIO()
# small one
foo = repr(self.frame)
self.frame.info(verbose=False, buf=buf)
# even smaller
self.frame.reindex(columns=['A']).info(verbose=False, buf=buf)
self.frame.reindex(columns=['A', 'B']).info(verbose=False, buf=buf)
# exhausting cases in DataFrame.info
# columns but no index
no_index = DataFrame(columns=[0, 1, 3])
foo = repr(no_index) # noqa
# no columns or index
self.empty.info(buf=buf)
df = DataFrame(["a\n\r\tb"], columns=["a\n\r\td"], index=["a\n\r\tf"])
assert "\t" not in repr(df)
assert "\r" not in repr(df)
assert "a\n" not in repr(df)
def test_repr_dimensions(self):
df = DataFrame([[1, 2, ], [3, 4]])
with option_context('display.show_dimensions', True):
assert "2 rows x 2 columns" in repr(df)
with option_context('display.show_dimensions', False):
assert "2 rows x 2 columns" not in repr(df)
with option_context('display.show_dimensions', 'truncate'):
assert "2 rows x 2 columns" not in repr(df)
@pytest.mark.slow
def test_repr_big(self):
# big one
biggie = DataFrame(np.zeros((200, 4)), columns=lrange(4),
index=lrange(200))
repr(biggie)
def test_repr_unsortable(self):
# columns are not sortable
import warnings
warn_filters = warnings.filters
warnings.filterwarnings('ignore',
category=FutureWarning,
module=".*format")
unsortable = DataFrame({'foo': [1] * 50,
datetime.today(): [1] * 50,
'bar': ['bar'] * 50,
datetime.today() + timedelta(1): ['bar'] * 50},
index=np.arange(50))
repr(unsortable)
fmt.set_option('display.precision', 3, 'display.column_space', 10)
repr(self.frame)
fmt.set_option('display.max_rows', 10, 'display.max_columns', 2)
repr(self.frame)
fmt.set_option('display.max_rows', 1000, 'display.max_columns', 1000)
repr(self.frame)
tm.reset_display_options()
warnings.filters = warn_filters
def test_repr_unicode(self):
uval = u('\u03c3\u03c3\u03c3\u03c3')
# TODO(wesm): is this supposed to be used?
bval = uval.encode('utf-8') # noqa
df = DataFrame({'A': [uval, uval]})
result = repr(df)
ex_top = ' A'
assert result.split('\n')[0].rstrip() == ex_top
df = DataFrame({'A': [uval, uval]})
result = repr(df)
assert result.split('\n')[0].rstrip() == ex_top
def test_unicode_string_with_unicode(self):
df = DataFrame({'A': [u("\u05d0")]})
if compat.PY3:
str(df)
else:
compat.text_type(df)
def test_bytestring_with_unicode(self):
df = DataFrame({'A': [u("\u05d0")]})
if compat.PY3:
bytes(df)
else:
str(df)
def test_very_wide_info_repr(self):
df = DataFrame(np.random.randn(10, 20),
columns=tm.rands_array(10, 20))
repr(df)
def test_repr_column_name_unicode_truncation_bug(self):
# #1906
df = DataFrame({'Id': [7117434],
'StringCol': ('Is it possible to modify drop plot code'
' so that the output graph is displayed '
'in iphone simulator, Is it possible to '
'modify drop plot code so that the '
'output graph is \xe2\x80\xa8displayed '
'in iphone simulator.Now we are adding '
'the CSV file externally. I want to Call'
' the File through the code..')})
with option_context('display.max_columns', 20):
assert 'StringCol' in repr(df)
def test_latex_repr(self):
result = r"""\begin{tabular}{llll}
\toprule
{} & 0 & 1 & 2 \\
\midrule
0 & $\alpha$ & b & c \\
1 & 1 & 2 & 3 \\
\bottomrule
\end{tabular}
"""
with option_context("display.latex.escape", False,
'display.latex.repr', True):
df = DataFrame([[r'$\alpha$', 'b', 'c'], [1, 2, 3]])
assert result == df._repr_latex_()
# GH 12182
assert df._repr_latex_() is None
@tm.capture_stdout
def test_info(self):
io = StringIO()
self.frame.info(buf=io)
self.tsframe.info(buf=io)
frame = DataFrame(np.random.randn(5, 3))
frame.info()
frame.info(verbose=False)
def test_info_memory(self):
# https://github.com/pandas-dev/pandas/issues/21056
df = pd.DataFrame({'a': pd.Series([1, 2], dtype='i8')})
buf = StringIO()
df.info(buf=buf)
result = buf.getvalue()
bytes = float(df.memory_usage().sum())
expected = textwrap.dedent("""\
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 2 entries, 0 to 1
Data columns (total 1 columns):
a 2 non-null int64
dtypes: int64(1)
memory usage: {} bytes
""".format(bytes))
assert result == expected
def test_info_wide(self):
from pandas import set_option, reset_option
io = StringIO()
df = DataFrame(np.random.randn(5, 101))
df.info(buf=io)
io = StringIO()
df.info(buf=io, max_cols=101)
rs = io.getvalue()
assert len(rs.splitlines()) > 100
xp = rs
set_option('display.max_info_columns', 101)
io = StringIO()
df.info(buf=io)
assert rs == xp
reset_option('display.max_info_columns')
def test_info_duplicate_columns(self):
io = StringIO()
# it works!
frame = DataFrame(np.random.randn(1500, 4),
columns=['a', 'a', 'b', 'b'])
frame.info(buf=io)
def test_info_duplicate_columns_shows_correct_dtypes(self):
# GH11761
io = StringIO()
frame = DataFrame([[1, 2.0]],
columns=['a', 'a'])
frame.info(buf=io)
io.seek(0)
lines = io.readlines()
assert 'a 1 non-null int64\n' == lines[3]
assert 'a 1 non-null float64\n' == lines[4]
def test_info_shows_column_dtypes(self):
dtypes = ['int64', 'float64', 'datetime64[ns]', 'timedelta64[ns]',
'complex128', 'object', 'bool']
data = {}
n = 10
for i, dtype in enumerate(dtypes):
data[i] = np.random.randint(2, size=n).astype(dtype)
df = DataFrame(data)
buf = StringIO()
df.info(buf=buf)
res = buf.getvalue()
for i, dtype in enumerate(dtypes):
name = '%d %d non-null %s' % (i, n, dtype)
assert name in res
def test_info_max_cols(self):
df = DataFrame(np.random.randn(10, 5))
for len_, verbose in [(5, None), (5, False), (10, True)]:
# For verbose always ^ setting ^ summarize ^ full output
with option_context('max_info_columns', 4):
buf = StringIO()
df.info(buf=buf, verbose=verbose)
res = buf.getvalue()
assert len(res.strip().split('\n')) == len_
for len_, verbose in [(10, None), (5, False), (10, True)]:
# max_cols no exceeded
with option_context('max_info_columns', 5):
buf = StringIO()
df.info(buf=buf, verbose=verbose)
res = buf.getvalue()
assert len(res.strip().split('\n')) == len_
for len_, max_cols in [(10, 5), (5, 4)]:
# setting truncates
with option_context('max_info_columns', 4):
buf = StringIO()
df.info(buf=buf, max_cols=max_cols)
res = buf.getvalue()
assert len(res.strip().split('\n')) == len_
# setting wouldn't truncate
with option_context('max_info_columns', 5):
buf = StringIO()
df.info(buf=buf, max_cols=max_cols)
res = buf.getvalue()
assert len(res.strip().split('\n')) == len_
def test_info_memory_usage(self):
# Ensure memory usage is displayed, when asserted, on the last line
dtypes = ['int64', 'float64', 'datetime64[ns]', 'timedelta64[ns]',
'complex128', 'object', 'bool']
data = {}
n = 10
for i, dtype in enumerate(dtypes):
data[i] = np.random.randint(2, size=n).astype(dtype)
df = DataFrame(data)
buf = StringIO()
# display memory usage case
df.info(buf=buf, memory_usage=True)
res = buf.getvalue().splitlines()
assert "memory usage: " in res[-1]
# do not display memory usage case
df.info(buf=buf, memory_usage=False)
res = buf.getvalue().splitlines()
assert "memory usage: " not in res[-1]
df.info(buf=buf, memory_usage=True)
res = buf.getvalue().splitlines()
# memory usage is a lower bound, so print it as XYZ+ MB
assert re.match(r"memory usage: [^+]+\+", res[-1])
df.iloc[:, :5].info(buf=buf, memory_usage=True)
res = buf.getvalue().splitlines()
# excluded column with object dtype, so estimate is accurate
assert not re.match(r"memory usage: [^+]+\+", res[-1])
# Test a DataFrame with duplicate columns
dtypes = ['int64', 'int64', 'int64', 'float64']
data = {}
n = 100
for i, dtype in enumerate(dtypes):
data[i] = np.random.randint(2, size=n).astype(dtype)
df = DataFrame(data)
df.columns = dtypes
df_with_object_index = pd.DataFrame({'a': [1]}, index=['foo'])
df_with_object_index.info(buf=buf, memory_usage=True)
res = buf.getvalue().splitlines()
assert re.match(r"memory usage: [^+]+\+", res[-1])
df_with_object_index.info(buf=buf, memory_usage='deep')
res = buf.getvalue().splitlines()
assert re.match(r"memory usage: [^+]+$", res[-1])
# Ensure df size is as expected
# (cols * rows * bytes) + index size
df_size = df.memory_usage().sum()
exp_size = len(dtypes) * n * 8 + df.index.nbytes
assert df_size == exp_size
# Ensure number of cols in memory_usage is the same as df
size_df = np.size(df.columns.values) + 1 # index=True; default
assert size_df == np.size(df.memory_usage())
# assert deep works only on object
assert df.memory_usage().sum() == df.memory_usage(deep=True).sum()
# test for validity
DataFrame(1, index=['a'], columns=['A']
).memory_usage(index=True)
DataFrame(1, index=['a'], columns=['A']
).index.nbytes
df = DataFrame(
data=1,
index=pd.MultiIndex.from_product(
[['a'], range(1000)]),
columns=['A']
)
df.index.nbytes
df.memory_usage(index=True)
df.index.values.nbytes
mem = df.memory_usage(deep=True).sum()
assert mem > 0
@pytest.mark.skipif(PYPY,
reason="on PyPy deep=True doesn't change result")
def test_info_memory_usage_deep_not_pypy(self):
df_with_object_index = pd.DataFrame({'a': [1]}, index=['foo'])
assert (df_with_object_index.memory_usage(
index=True, deep=True).sum() >
df_with_object_index.memory_usage(
index=True).sum())
df_object = pd.DataFrame({'a': ['a']})
assert (df_object.memory_usage(deep=True).sum() >
df_object.memory_usage().sum())
@pytest.mark.skipif(not PYPY,
reason="on PyPy deep=True does not change result")
def test_info_memory_usage_deep_pypy(self):
df_with_object_index = pd.DataFrame({'a': [1]}, index=['foo'])
assert (df_with_object_index.memory_usage(
index=True, deep=True).sum() ==
df_with_object_index.memory_usage(
index=True).sum())
df_object = pd.DataFrame({'a': ['a']})
assert (df_object.memory_usage(deep=True).sum() ==
df_object.memory_usage().sum())
@pytest.mark.skipif(PYPY, reason="PyPy getsizeof() fails by design")
def test_usage_via_getsizeof(self):
df = DataFrame(
data=1,
index=pd.MultiIndex.from_product(
[['a'], range(1000)]),
columns=['A']
)
mem = df.memory_usage(deep=True).sum()
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = mem - sys.getsizeof(df)
assert abs(diff) < 100
def test_info_memory_usage_qualified(self):
buf = StringIO()
df = DataFrame(1, columns=list('ab'),
index=[1, 2, 3])
df.info(buf=buf)
assert '+' not in buf.getvalue()
buf = StringIO()
df = DataFrame(1, columns=list('ab'),
index=list('ABC'))
df.info(buf=buf)
assert '+' in buf.getvalue()
buf = StringIO()
df = DataFrame(1, columns=list('ab'),
index=pd.MultiIndex.from_product(
[range(3), range(3)]))
df.info(buf=buf)
assert '+' not in buf.getvalue()
buf = StringIO()
df = DataFrame(1, columns=list('ab'),
index=pd.MultiIndex.from_product(
[range(3), ['foo', 'bar']]))
df.info(buf=buf)
assert '+' in buf.getvalue()
def test_info_memory_usage_bug_on_multiindex(self):
# GH 14308
# memory usage introspection should not materialize .values
from string import ascii_uppercase as uppercase
def memory_usage(f):
return f.memory_usage(deep=True).sum()
N = 100
M = len(uppercase)
index = pd.MultiIndex.from_product([list(uppercase),
pd.date_range('20160101',
periods=N)],
names=['id', 'date'])
df = DataFrame({'value': np.random.randn(N * M)}, index=index)
unstacked = df.unstack('id')
assert df.values.nbytes == unstacked.values.nbytes
assert memory_usage(df) > memory_usage(unstacked)
# high upper bound
assert memory_usage(unstacked) - memory_usage(df) < 2000
def test_info_categorical(self):
# GH14298
idx = pd.CategoricalIndex(['a', 'b'])
df = pd.DataFrame(np.zeros((2, 2)), index=idx, columns=idx)
buf = StringIO()
df.info(buf=buf)
def test_info_categorical_column(self):
# make sure it works
n = 2500
df = DataFrame({'int64': np.random.randint(100, size=n)})
df['category'] = Series(np.array(list('abcdefghij')).take(
np.random.randint(0, 10, size=n))).astype('category')
df.isna()
buf = StringIO()
df.info(buf=buf)
df2 = df[df['category'] == 'd']
buf = compat.StringIO()
df2.info(buf=buf)
def test_repr_categorical_dates_periods(self):
# normal DataFrame
dt = date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
p = period_range('2011-01', freq='M', periods=5)
df = DataFrame({'dt': dt, 'p': p})
exp = """ dt p
0 2011-01-01 09:00:00-05:00 2011-01
1 2011-01-01 10:00:00-05:00 2011-02
2 2011-01-01 11:00:00-05:00 2011-03
3 2011-01-01 12:00:00-05:00 2011-04
4 2011-01-01 13:00:00-05:00 2011-05"""
df = DataFrame({'dt': Categorical(dt), 'p': Categorical(p)})
assert repr(df) == exp
| bsd-3-clause |
yannickmartin/wellFARE | wellfare/ILM/fast_estimators.py | 1 | 5629 | """
This module implements fast estimators for the time-profiles of
growth rate, promoter activity, and protein concentrations.
These estimators rely on a simple model in which gene expression
is modeled as a one-step process. This enables to compute the
observation matrix directly using an ad-hoc formula.
As a consequence these algorithms are faster and require less
parameters than their counterparts in module ``estimators``
Simple approximations are made to compute the observation matrix,
these are valid as long as the vector of estimation times (ttu) of
the different estimated input (growth rate, promoter actitivity,
protein concentration) has a fine time resolution.
See also:
----------
estimators : collection of functions for the inference
"""
from ..curves import Curve
from .methods import DEFAULT_ALPHAS, infer_control
def ilp_growth_rate(curve_volume, ttu, alphas=None, eps_L=.0001):
"""
Returns
--------
mu, v_smoothed, model
As described below.
mu
Vector of inferred mu.
v_smoothed
The predicted value of the observed volume at the same time
points as the data. v_smoothed will appear smoothed compared to
the measured volume.
mod
instance of sklearn.linear_model.RidgeCV, used for the Ridge
regularization / cross-validation. Useful to get the value of
the parameter alpha used etc.
"""
if isinstance(curve_volume, list):
results = [ilp_growth_rate(v, ttu,
alphas=alphas, eps_L=eps_L)
for v in curve_volume]
return zip(*results)
if alphas is None: alphas = DEFAULT_ALPHAS
ttv = curve_volume.x
dttu = 1.0*(ttu[1]-ttu[0])
H_ic = np.ones((len(ttv),1))
# dT is a Ny x Nu matrix with
# dT[i,j] = ttv[i] - ttu[j]
dT = np.array([ttv]).T - ttu
H_u = ( np.maximum(0, np.minimum(dttu, dT))
* curve_volume(ttu+ dttu/2))
H = np.hstack([H_ic, H_u])
growth_rate, v_smooth, ic, alpha, ascores = \
infer_control(H, y= curve_volume.y, Nic= 1,
alphas= alphas, eps_L = eps_L)
return ( Curve(ttu, growth_rate),
Curve(ttv, v_smooth),
ic, alpha, ascores )
def ilp_synthesis_rate(curve_fluo, curve_volume, ttu, degr,
alphas=None, eps_L=.0001):
"""
dF/dt = s(t)V(t) - degr*F
Parameters
-----------
curve_fluo
A curve instance representing the (noisy) measured
fluorescence
curve_volume
A curve instance representing the (noisy) measured
volume
ttu
Times at which the control is
Returns
--------
synth_rate, fluo_smoothed, ic, alpha, ascores
As described below.
synth_rate
Vector. Inferred control.
fluo_smoothed
The predicted value of the observed data at the same time
points as the data. y_smoothed will appear smoothed compared
to y.
mod
instance of sklearn.linear_model.RidgeCV, used for the Ridge
regularization / cross-validation. Useful to get the value
of the parameter alpha used etc.
"""
if isinstance(curve_fluo, list):
results = [ilp_synthesis_rate(f, v, ttu, degr,
alphas=alphas, eps_L=eps_L)
for f, v in zip(curve_fluo, curve_volume)]
return zip(*results)
if alphas is None: alphas = DEFAULT_ALPHAS
tt_fluo= curve_fluo.x
H_ic = np.exp(-degr*tt_fluo).reshape((len(tt_fluo),1))
model = lambda Y,t: 1 - degr*Y
dtau = ttu[1]-ttu[0]
m = odeint(model,0,[0,dtau]).flatten()[1]
TT = (ttu-np.array([tt_fluo]).T)
H_u = (m*np.exp(degr*TT)*(TT<0)) * curve_volume(ttu + dtau/2)
H = np.hstack([H_ic, H_u])
activity, fluo_smooth, ic, alpha, ascores = \
infer_control(H, y= curve_fluo.y, Nic= 1, alphas= alphas,
eps_L = eps_L)
return ( Curve(ttu, activity),
Curve(tt_fluo, fluo_smooth),
ic, alpha, ascores )
def ilp_concentration(curve_fluo, curve_volume, ttu, dR, dP,
alphas=None, eps_L=0.0001):
""" Retrieves the concentration of a protein P, given
the fluorescence of reporter R.
Parameters
-----------
curve_fluo
A curve instance representing the measured fluorescence
(proportional to the quantities of reporter)
curve_volume
Volume of the population.
dR
Degradation rate of the reporter
dP
Degradation rate of the proteins.
alphas
Smoothing parameters to be tested.
eps_L
Negligible factor for the derivation matrix.
"""
if isinstance(curve_fluo, list):
results = [ilp_concentration(f, v, ttu, dR, dP,
alphas=alphas, eps_L=eps_L)
for f, v in zip(curve_fluo, curve_volume)]
return zip(*results)
tt = curve_fluo.x
deltatau = ttu[1]-ttu[0]
dT = np.array([tt]).T-ttu
dTlz = dT >= 0 # ti-tj > 0
dTlzsdtau = dTlz*(dT < deltatau) # 0 < ti-tj < delta_tau
A = np.exp(dR*np.minimum(deltatau, dT)) - 1
B = dTlz*np.exp(dT*(-dR))*(dP-dR)/dR
Hu = (dTlzsdtau + A*B)*curve_volume(ttu+deltatau/2)
Hic = np.array([np.exp(-dR*tt)]).reshape((len(tt),1))
H = np.hstack([Hic, Hu])
p_est, f_est, ic, a, ascores = infer_control(
H, curve_fluo.y, 1, alphas=alphas, eps_L=eps_L)
return (Curve(ttu, p_est),
Curve(tt, f_est),
ic, a, ascores ) | lgpl-3.0 |
RPGOne/Skynet | scikit-learn-0.18.1/examples/tree/plot_unveil_tree_structure.py | 67 | 4824 | """
=========================================
Understanding the decision tree structure
=========================================
The decision tree structure can be analysed to gain further insight on the
relation between the features and the target to predict. In this example, we
show how to retrieve:
- the binary tree structure;
- the depth of each node and whether or not it's a leaf;
- the nodes that were reached by a sample using the ``decision_path`` method;
- the leaf that was reached by a sample using the apply method;
- the rules that were used to predict a sample;
- the decision path shared by a group of samples.
"""
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
iris = load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
estimator = DecisionTreeClassifier(max_leaf_nodes=3, random_state=0)
estimator.fit(X_train, y_train)
# The decision estimator has an attribute called tree_ which stores the entire
# tree structure and allows access to low level attributes. The binary tree
# tree_ is represented as a number of parallel arrays. The i-th element of each
# array holds information about the node `i`. Node 0 is the tree's root. NOTE:
# Some of the arrays only apply to either leaves or split nodes, resp. In this
# case the values of nodes of the other type are arbitrary!
#
# Among those arrays, we have:
# - left_child, id of the left child of the node
# - right_child, id of the right child of the node
# - feature, feature used for splitting the node
# - threshold, threshold value at the node
#
# Using those arrays, we can parse the tree structure:
n_nodes = estimator.tree_.node_count
children_left = estimator.tree_.children_left
children_right = estimator.tree_.children_right
feature = estimator.tree_.feature
threshold = estimator.tree_.threshold
# The tree structure can be traversed to compute various properties such
# as the depth of each node and whether or not it is a leaf.
node_depth = np.zeros(shape=n_nodes)
is_leaves = np.zeros(shape=n_nodes, dtype=bool)
stack = [(0, -1)] # seed is the root node id and its parent depth
while len(stack) > 0:
node_id, parent_depth = stack.pop()
node_depth[node_id] = parent_depth + 1
# If we have a test node
if (children_left[node_id] != children_right[node_id]):
stack.append((children_left[node_id], parent_depth + 1))
stack.append((children_right[node_id], parent_depth + 1))
else:
is_leaves[node_id] = True
print("The binary tree structure has %s nodes and has "
"the following tree structure:"
% n_nodes)
for i in range(n_nodes):
if is_leaves[i]:
print("%snode=%s leaf node." % (node_depth[i] * "\t", i))
else:
print("%snode=%s test node: go to node %s if X[:, %s] <= %ss else to "
"node %s."
% (node_depth[i] * "\t",
i,
children_left[i],
feature[i],
threshold[i],
children_right[i],
))
print()
# First let's retrieve the decision path of each sample. The decision_path
# method allows to retrieve the node indicator functions. A non zero element of
# indicator matrix at the position (i, j) indicates that the sample i goes
# through the node j.
node_indicator = estimator.decision_path(X_test)
# Similarly, we can also have the leaves ids reached by each sample.
leave_id = estimator.apply(X_test)
# Now, it's possible to get the tests that were used to predict a sample or
# a group of samples. First, let's make it for the sample.
sample_id = 0
node_index = node_indicator.indices[node_indicator.indptr[sample_id]:
node_indicator.indptr[sample_id + 1]]
print('Rules used to predict sample %s: ' % sample_id)
for node_id in node_index:
if leave_id[sample_id] != node_id:
continue
if (X_test[sample_id, feature[node_id]] <= threshold[node_id]):
threshold_sign = "<="
else:
threshold_sign = ">"
print("decision id node %s : (X[%s, %s] (= %s) %s %s)"
% (node_id,
sample_id,
feature[node_id],
X_test[i, feature[node_id]],
threshold_sign,
threshold[node_id]))
# For a group of samples, we have the following common node.
sample_ids = [0, 1]
common_nodes = (node_indicator.toarray()[sample_ids].sum(axis=0) ==
len(sample_ids))
common_node_id = np.arange(n_nodes)[common_nodes]
print("\nThe following samples %s share the node %s in the tree"
% (sample_ids, common_node_id))
print("It is %s %% of all nodes." % (100 * len(common_node_id) / n_nodes,))
| bsd-3-clause |
nmartensen/pandas | pandas/tests/frame/test_reshape.py | 5 | 30682 | # -*- coding: utf-8 -*-
from __future__ import print_function
from warnings import catch_warnings
from datetime import datetime
import itertools
import pytest
from numpy.random import randn
from numpy import nan
import numpy as np
from pandas.compat import u
from pandas import (DataFrame, Index, Series, MultiIndex, date_range,
Timedelta, Period)
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameReshape(TestData):
def test_pivot(self):
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data)
pivoted = frame.pivot(
index='index', columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
tm.assert_frame_equal(pivoted, expected)
# name tracking
assert pivoted.index.name == 'index'
assert pivoted.columns.name == 'columns'
# don't specify values
pivoted = frame.pivot(index='index', columns='columns')
assert pivoted.index.name == 'index'
assert pivoted.columns.names == (None, 'columns')
with catch_warnings(record=True):
# pivot multiple columns
wp = tm.makePanel()
lp = wp.to_frame()
df = lp.reset_index()
tm.assert_frame_equal(df.pivot('major', 'minor'), lp.unstack())
def test_pivot_duplicates(self):
data = DataFrame({'a': ['bar', 'bar', 'foo', 'foo', 'foo'],
'b': ['one', 'two', 'one', 'one', 'two'],
'c': [1., 2., 3., 3., 4.]})
with tm.assert_raises_regex(ValueError, 'duplicate entries'):
data.pivot('a', 'b', 'c')
def test_pivot_empty(self):
df = DataFrame({}, columns=['a', 'b', 'c'])
result = df.pivot('a', 'b', 'c')
expected = DataFrame({})
tm.assert_frame_equal(result, expected, check_names=False)
def test_pivot_integer_bug(self):
df = DataFrame(data=[("A", "1", "A1"), ("B", "2", "B2")])
result = df.pivot(index=1, columns=0, values=2)
repr(result)
tm.assert_index_equal(result.columns, Index(['A', 'B'], name=0))
def test_pivot_index_none(self):
# gh-3962
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data).set_index('index')
result = frame.pivot(columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
assert_frame_equal(result, expected)
# omit values
result = frame.pivot(columns='columns')
expected.columns = pd.MultiIndex.from_tuples([('values', 'One'),
('values', 'Two')],
names=[None, 'columns'])
expected.index.name = 'index'
tm.assert_frame_equal(result, expected, check_names=False)
assert result.index.name == 'index'
assert result.columns.names == (None, 'columns')
expected.columns = expected.columns.droplevel(0)
result = frame.pivot(columns='columns', values='values')
expected.columns.name = 'columns'
tm.assert_frame_equal(result, expected)
def test_stack_unstack(self):
f = self.frame.copy()
f[:] = np.arange(np.prod(f.shape)).reshape(f.shape)
stacked = f.stack()
stacked_df = DataFrame({'foo': stacked, 'bar': stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
assert_frame_equal(unstacked, f)
assert_frame_equal(unstacked_df['bar'], f)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
assert_frame_equal(unstacked_cols.T, f)
assert_frame_equal(unstacked_cols_df['bar'].T, f)
def test_unstack_fill(self):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack(fill_value=-1)
expected = DataFrame({'a': [1, -1, 5], 'b': [2, 4, -1]},
index=['x', 'y', 'z'], dtype=np.int16)
assert_frame_equal(result, expected)
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame({'a': [1, 0.5, 5], 'b': [2, 4, 0.5]},
index=['x', 'y', 'z'], dtype=np.float)
assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list('AB'), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
# From a mixed type dataframe
df['A'] = df['A'].astype(np.int16)
df['B'] = df['B'].astype(np.float64)
result = df.unstack(fill_value=-1)
expected['A'] = expected['A'].astype(np.int16)
expected['B'] = expected['B'].astype(np.float64)
assert_frame_equal(result, expected)
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.float)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = pd.date_range('2012-01-01', periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [dv[0], pd.NaT, dv[3]],
'b': [dv[1], dv[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame({'a': [dv[0], dv[0], dv[3]],
'b': [dv[1], dv[2], dv[0]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_timedelta(self):
# Test unstacking with time deltas
td = [Timedelta(days=i) for i in range(4)]
data = Series(td)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [td[0], pd.NaT, td[3]],
'b': [td[1], td[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=td[1])
expected = DataFrame({'a': [td[0], td[1], td[3]],
'b': [td[1], td[2], td[1]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_period(self):
# Test unstacking with period
periods = [Period('2012-01'), Period('2012-02'), Period('2012-03'),
Period('2012-04')]
data = Series(periods)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [periods[0], None, periods[3]],
'b': [periods[1], periods[2], None]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=periods[1])
expected = DataFrame({'a': [periods[0], periods[1], periods[3]],
'b': [periods[1], periods[2], periods[1]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
data = pd.Series(['a', 'b', 'c', 'a'], dtype='category')
data.index = pd.MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame({'a': pd.Categorical(list('axa'),
categories=list('abc')),
'b': pd.Categorical(list('bcx'),
categories=list('abc'))},
index=list('xyz'))
assert_frame_equal(result, expected)
# Fill with non-category results in NaN entries similar to above
result = data.unstack(fill_value='d')
assert_frame_equal(result, expected)
# Fill with category value replaces missing values as expected
result = data.unstack(fill_value='c')
expected = DataFrame({'a': pd.Categorical(list('aca'),
categories=list('abc')),
'b': pd.Categorical(list('bcc'),
categories=list('abc'))},
index=list('xyz'))
assert_frame_equal(result, expected)
def test_unstack_preserve_dtypes(self):
# Checks fix for #11847
df = pd.DataFrame(dict(state=['IL', 'MI', 'NC'],
index=['a', 'b', 'c'],
some_categories=pd.Series(['a', 'b', 'c']
).astype('category'),
A=np.random.rand(3),
B=1,
C='foo',
D=pd.Timestamp('20010102'),
E=pd.Series([1.0, 50.0, 100.0]
).astype('float32'),
F=pd.Series([3.0, 4.0, 5.0]).astype('float64'),
G=False,
H=pd.Series([1, 200, 923442], dtype='int8')))
def unstack_and_compare(df, column_name):
unstacked1 = df.unstack([column_name])
unstacked2 = df.unstack(column_name)
assert_frame_equal(unstacked1, unstacked2)
df1 = df.set_index(['state', 'index'])
unstack_and_compare(df1, 'index')
df1 = df.set_index(['state', 'some_categories'])
unstack_and_compare(df1, 'some_categories')
df1 = df.set_index(['F', 'C'])
unstack_and_compare(df1, 'F')
df1 = df.set_index(['G', 'B', 'state'])
unstack_and_compare(df1, 'B')
df1 = df.set_index(['E', 'A'])
unstack_and_compare(df1, 'E')
df1 = df.set_index(['state', 'index'])
s = df1['A']
unstack_and_compare(s, 'index')
def test_stack_ints(self):
columns = MultiIndex.from_tuples(list(itertools.product(range(3),
repeat=3)))
df = DataFrame(np.random.randn(30, 27), columns=columns)
assert_frame_equal(df.stack(level=[1, 2]),
df.stack(level=1).stack(level=1))
assert_frame_equal(df.stack(level=[-2, -1]),
df.stack(level=1).stack(level=1))
df_named = df.copy()
df_named.columns.set_names(range(3), inplace=True)
assert_frame_equal(df_named.stack(level=[1, 2]),
df_named.stack(level=1).stack(level=1))
def test_stack_mixed_levels(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(randn(4, 4), columns=columns)
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
# GH #8584: Need to check that stacking works when a number
# is passed that is both a level name and in the range of
# the level numbers
df2 = df.copy()
df2.columns.names = ['exp', 'animal', 1]
assert_frame_equal(df2.stack(level=['animal', 1]),
animal_hair_stacked, check_names=False)
assert_frame_equal(df2.stack(level=['exp', 1]),
exp_hair_stacked, check_names=False)
# When mixed types are passed and the ints are not level
# names, raise
pytest.raises(ValueError, df2.stack, level=['animal', 0])
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
df3 = df.copy()
df3.columns.names = ['exp', 'animal', 0]
assert_frame_equal(df3.stack(level=['animal', 0]),
animal_hair_stacked, check_names=False)
def test_stack_int_level_names(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(randn(4, 4), columns=columns)
exp_animal_stacked = df.stack(level=['exp', 'animal'])
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
df2 = df.copy()
df2.columns.names = [0, 1, 2]
assert_frame_equal(df2.stack(level=[1, 2]), animal_hair_stacked,
check_names=False)
assert_frame_equal(df2.stack(level=[0, 1]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df2.stack(level=[0, 2]), exp_hair_stacked,
check_names=False)
# Out-of-order int column names
df3 = df.copy()
df3.columns.names = [2, 0, 1]
assert_frame_equal(df3.stack(level=[0, 1]), animal_hair_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 0]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 1]), exp_hair_stacked,
check_names=False)
def test_unstack_bool(self):
df = DataFrame([False, False],
index=MultiIndex.from_arrays([['a', 'b'], ['c', 'l']]),
columns=['col'])
rs = df.unstack()
xp = DataFrame(np.array([[False, np.nan], [np.nan, False]],
dtype=object),
index=['a', 'b'],
columns=MultiIndex.from_arrays([['col', 'col'],
['c', 'l']]))
assert_frame_equal(rs, xp)
def test_unstack_level_binding(self):
# GH9856
mi = pd.MultiIndex(
levels=[[u('foo'), u('bar')], [u('one'), u('two')],
[u('a'), u('b')]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],
names=[u('first'), u('second'), u('third')])
s = pd.Series(0, index=mi)
result = s.unstack([1, 2]).stack(0)
expected_mi = pd.MultiIndex(
levels=[['foo', 'bar'], ['one', 'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=['first', 'second'])
expected = pd.DataFrame(np.array([[np.nan, 0],
[0, np.nan],
[np.nan, 0],
[0, np.nan]],
dtype=np.float64),
index=expected_mi,
columns=pd.Index(['a', 'b'], name='third'))
assert_frame_equal(result, expected)
def test_unstack_to_series(self):
# check reversibility
data = self.frame.unstack()
assert isinstance(data, Series)
undo = data.unstack().T
assert_frame_equal(undo, self.frame)
# check NA handling
data = DataFrame({'x': [1, 2, np.NaN], 'y': [3.0, 4, np.NaN]})
data.index = Index(['a', 'b', 'c'])
result = data.unstack()
midx = MultiIndex(levels=[['x', 'y'], ['a', 'b', 'c']],
labels=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = Series([1, 2, np.NaN, 3, 4, np.NaN], index=midx)
assert_series_equal(result, expected)
# check composability of unstack
old_data = data.copy()
for _ in range(4):
data = data.unstack()
assert_frame_equal(old_data, data)
def test_unstack_dtypes(self):
# GH 2929
rows = [[1, 1, 3, 4],
[1, 2, 3, 4],
[2, 1, 3, 4],
[2, 2, 3, 4]]
df = DataFrame(rows, columns=list('ABCD'))
result = df.get_dtype_counts()
expected = Series({'int64': 4})
assert_series_equal(result, expected)
# single dtype
df2 = df.set_index(['A', 'B'])
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'int64': 4})
assert_series_equal(result, expected)
# mixed
df2 = df.set_index(['A', 'B'])
df2['C'] = 3.
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'int64': 2, 'float64': 2})
assert_series_equal(result, expected)
df2['D'] = 'foo'
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'float64': 2, 'object': 2})
assert_series_equal(result, expected)
# GH7405
for c, d in (np.zeros(5), np.zeros(5)), \
(np.arange(5, dtype='f8'), np.arange(5, 10, dtype='f8')):
df = DataFrame({'A': ['a'] * 5, 'C': c, 'D': d,
'B': pd.date_range('2012-01-01', periods=5)})
right = df.iloc[:3].copy(deep=True)
df = df.set_index(['A', 'B'])
df['D'] = df['D'].astype('int64')
left = df.iloc[:3].unstack(0)
right = right.set_index(['A', 'B']).unstack(0)
right[('D', 'a')] = right[('D', 'a')].astype('int64')
assert left.shape == (3, 2)
tm.assert_frame_equal(left, right)
def test_unstack_non_unique_index_names(self):
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')],
names=['c1', 'c1'])
df = DataFrame([1, 2], index=idx)
with pytest.raises(ValueError):
df.unstack('c1')
with pytest.raises(ValueError):
df.T.stack('c1')
def test_unstack_nan_index(self): # GH7466
cast = lambda val: '{0:1}'.format('' if val != val else val)
nan = np.nan
def verify(df):
mk_list = lambda a: list(a) if isinstance(a, tuple) else [a]
rows, cols = df.notna().values.nonzero()
for i, j in zip(rows, cols):
left = sorted(df.iloc[i, j].split('.'))
right = mk_list(df.index[i]) + mk_list(df.columns[j])
right = sorted(list(map(cast, right)))
assert left == right
df = DataFrame({'jim': ['a', 'b', nan, 'd'],
'joe': ['w', 'x', 'y', 'z'],
'jolie': ['a.w', 'b.x', ' .y', 'd.z']})
left = df.set_index(['jim', 'joe']).unstack()['jolie']
right = df.set_index(['joe', 'jim']).unstack()['jolie'].T
assert_frame_equal(left, right)
for idx in itertools.permutations(df.columns[:2]):
mi = df.set_index(list(idx))
for lev in range(2):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == len(df)
verify(udf['jolie'])
df = DataFrame({'1st': ['d'] * 3 + [nan] * 5 + ['a'] * 2 +
['c'] * 3 + ['e'] * 2 + ['b'] * 5,
'2nd': ['y'] * 2 + ['w'] * 3 + [nan] * 3 +
['z'] * 4 + [nan] * 3 + ['x'] * 3 + [nan] * 2,
'3rd': [67, 39, 53, 72, 57, 80, 31, 18, 11, 30, 59,
50, 62, 59, 76, 52, 14, 53, 60, 51]})
df['4th'], df['5th'] = \
df.apply(lambda r: '.'.join(map(cast, r)), axis=1), \
df.apply(lambda r: '.'.join(map(cast, r.iloc[::-1])), axis=1)
for idx in itertools.permutations(['1st', '2nd', '3rd']):
mi = df.set_index(list(idx))
for lev in range(3):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == 2 * len(df)
for col in ['4th', '5th']:
verify(udf[col])
# GH7403
df = pd.DataFrame(
{'A': list('aaaabbbb'), 'B': range(8), 'C': range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[3, 0, 1, 2, nan, nan, nan, nan],
[nan, nan, nan, nan, 4, 5, 6, 7]]
vals = list(map(list, zip(*vals)))
idx = Index([nan, 0, 1, 2, 4, 5, 6, 7], name='B')
cols = MultiIndex(levels=[['C'], ['a', 'b']],
labels=[[0, 0], [0, 1]],
names=[None, 'A'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
df = DataFrame({'A': list('aaaabbbb'), 'B': list(range(4)) * 2,
'C': range(8)})
df.iloc[2, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[2, nan], [0, 4], [1, 5], [nan, 6], [3, 7]]
cols = MultiIndex(levels=[['C'], ['a', 'b']],
labels=[[0, 0], [0, 1]],
names=[None, 'A'])
idx = Index([nan, 0, 1, 2, 3], name='B')
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
df = pd.DataFrame({'A': list('aaaabbbb'), 'B': list(range(4)) * 2,
'C': range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[3, nan], [0, 4], [1, 5], [2, 6], [nan, 7]]
cols = MultiIndex(levels=[['C'], ['a', 'b']],
labels=[[0, 0], [0, 1]],
names=[None, 'A'])
idx = Index([nan, 0, 1, 2, 3], name='B')
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
# GH7401
df = pd.DataFrame({'A': list('aaaaabbbbb'), 'C': np.arange(10),
'B': (date_range('2012-01-01', periods=5)
.tolist() * 2)})
df.iloc[3, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack()
vals = np.array([[3, 0, 1, 2, nan, 4], [nan, 5, 6, 7, 8, 9]])
idx = Index(['a', 'b'], name='A')
cols = MultiIndex(levels=[['C'], date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
# GH4862
vals = [['Hg', nan, nan, 680585148],
['U', 0.0, nan, 680585148],
['Pb', 7.07e-06, nan, 680585148],
['Sn', 2.3614e-05, 0.0133, 680607017],
['Ag', 0.0, 0.0133, 680607017],
['Hg', -0.00015, 0.0133, 680607017]]
df = DataFrame(vals, columns=['agent', 'change', 'dosage', 's_id'],
index=[17263, 17264, 17265, 17266, 17267, 17268])
left = df.copy().set_index(['s_id', 'dosage', 'agent']).unstack()
vals = [[nan, nan, 7.07e-06, nan, 0.0],
[0.0, -0.00015, nan, 2.3614e-05, nan]]
idx = MultiIndex(levels=[[680585148, 680607017], [0.0133]],
labels=[[0, 1], [-1, 0]],
names=['s_id', 'dosage'])
cols = MultiIndex(levels=[['change'], ['Ag', 'Hg', 'Pb', 'Sn', 'U']],
labels=[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]],
names=[None, 'agent'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
left = df.loc[17264:].copy().set_index(['s_id', 'dosage', 'agent'])
assert_frame_equal(left.unstack(), right)
# GH9497 - multiple unstack with nulls
df = DataFrame({'1st': [1, 2, 1, 2, 1, 2],
'2nd': pd.date_range('2014-02-01', periods=6,
freq='D'),
'jim': 100 + np.arange(6),
'joe': (np.random.randn(6) * 10).round(2)})
df['3rd'] = df['2nd'] - pd.Timestamp('2014-02-02')
df.loc[1, '2nd'] = df.loc[3, '2nd'] = nan
df.loc[1, '3rd'] = df.loc[4, '3rd'] = nan
left = df.set_index(['1st', '2nd', '3rd']).unstack(['2nd', '3rd'])
assert left.notna().values.sum() == 2 * len(df)
for col in ['jim', 'joe']:
for _, r in df.iterrows():
key = r['1st'], (col, r['2nd'], r['3rd'])
assert r[col] == left.loc[key]
def test_stack_datetime_column_multiIndex(self):
# GH 8039
t = datetime(2014, 1, 1)
df = DataFrame(
[1, 2, 3, 4], columns=MultiIndex.from_tuples([(t, 'A', 'B')]))
result = df.stack()
eidx = MultiIndex.from_product([(0, 1, 2, 3), ('B',)])
ecols = MultiIndex.from_tuples([(t, 'A')])
expected = DataFrame([1, 2, 3, 4], index=eidx, columns=ecols)
assert_frame_equal(result, expected)
def test_stack_partial_multiIndex(self):
# GH 8844
def _test_stack_with_multiindex(multiindex):
df = DataFrame(np.arange(3 * len(multiindex))
.reshape(3, len(multiindex)),
columns=multiindex)
for level in (-1, 0, 1, [0, 1], [1, 0]):
result = df.stack(level=level, dropna=False)
if isinstance(level, int):
# Stacking a single level should not make any all-NaN rows,
# so df.stack(level=level, dropna=False) should be the same
# as df.stack(level=level, dropna=True).
expected = df.stack(level=level, dropna=True)
if isinstance(expected, Series):
assert_series_equal(result, expected)
else:
assert_frame_equal(result, expected)
df.columns = MultiIndex.from_tuples(df.columns.get_values(),
names=df.columns.names)
expected = df.stack(level=level, dropna=False)
if isinstance(expected, Series):
assert_series_equal(result, expected)
else:
assert_frame_equal(result, expected)
full_multiindex = MultiIndex.from_tuples([('B', 'x'), ('B', 'z'),
('A', 'y'),
('C', 'x'), ('C', 'u')],
names=['Upper', 'Lower'])
for multiindex_columns in ([0, 1, 2, 3, 4],
[0, 1, 2, 3], [0, 1, 2, 4],
[0, 1, 2], [1, 2, 3], [2, 3, 4],
[0, 1], [0, 2], [0, 3],
[0], [2], [4]):
_test_stack_with_multiindex(full_multiindex[multiindex_columns])
if len(multiindex_columns) > 1:
multiindex_columns.reverse()
_test_stack_with_multiindex(
full_multiindex[multiindex_columns])
df = DataFrame(np.arange(6).reshape(2, 3),
columns=full_multiindex[[0, 1, 3]])
result = df.stack(dropna=False)
expected = DataFrame([[0, 2], [1, nan], [3, 5], [4, nan]],
index=MultiIndex(
levels=[[0, 1], ['u', 'x', 'y', 'z']],
labels=[[0, 0, 1, 1],
[1, 3, 1, 3]],
names=[None, 'Lower']),
columns=Index(['B', 'C'], name='Upper'),
dtype=df.dtypes[0])
assert_frame_equal(result, expected)
def test_stack_preserve_categorical_dtype(self):
# GH13854
for ordered in [False, True]:
for labels in [list("yxz"), list("yxy")]:
cidx = pd.CategoricalIndex(labels, categories=list("xyz"),
ordered=ordered)
df = DataFrame([[10, 11, 12]], columns=cidx)
result = df.stack()
# `MutliIndex.from_product` preserves categorical dtype -
# it's tested elsewhere.
midx = pd.MultiIndex.from_product([df.index, cidx])
expected = Series([10, 11, 12], index=midx)
tm.assert_series_equal(result, expected)
| bsd-3-clause |
leelasd/LigParGenTools | ljLRC.py | 1 | 1027 | import os
import sys
import numpy as np
import pandas as pandas
def readLJfromPRM(filename):
infi=open(filename).readlines()
NBS=0
for n in range(len(infi)):
if 'NONBONDED' in infi[n]:
NBS=n+2
break
LJpar={}
atno = 0
for line in infi[NBS:]:
LJpar[atno]={'AT':line.split()[0],'EPS':-1.0*float(line.split()[2]),'SIG':float(line.split()[3])/0.561231}
atno+=1
return LJpar
def CalcLRC(solvpar,solupar,Nsolv,cutoff,denSolv):
LRC = 0
for i in solvpar.keys():
for j in solupar.keys():
sigma = np.sqrt(solvpar[i]['SIG']*solupar[j]['SIG'])
epsil = np.sqrt(solvpar[i]['EPS']*solupar[j]['EPS'])
ratio = sigma/cutoff
print i,j, (pow(ratio,9)/3.0 - pow(ratio,3))*epsil*pow(sigma,3)
LRC = LRC + (pow(ratio,9)/3.0 - pow(ratio,3))*epsil*pow(sigma,3)
print LRC
LRC = LRC*8*np.pi*Nsolv*denSolv/3.0
return LRC
solv=readLJfromPRM('CYH.prm')
solu=readLJfromPRM('MET.prm')
print CalcLRC(solv,solu,Nsolv=140,cutoff=10.0,denSolv=0.774)
| mit |
CGATOxford/CGATPipelines | obsolete/pipeline_transcriptdiffexpression/trackers/Results.py | 1 | 4372 | import pandas as pd
import numpy as np
import sqlite3
from CGATReport.Tracker import SingleTableTrackerRows
from CGATReport.Tracker import SingleTableTrackerHistogram
from CGATReport.Tracker import *
from CGATReport.Utils import PARAMS as P
from IsoformReport import *
###############################################################################
# parse params
###############################################################################
DATABASE = P.get('', P.get('sql_backend', 'sqlite:///./csvdb'))
ANNOTATIONS_DATABASE = P.get('annotations_database')
###############################################################################
# trackers
###############################################################################
class SleuthResults(IsoformTracker):
pattern = "(.*)_DEresults$"
direction = ""
where = "WHERE p_value NOT NULL"
def __call__(self, track, slice=None):
quantifier = track.split("_")[-2]
statement = '''
SELECT A.gene_name, A.gene_id, A.transcript_id, B.reason AS flagged,
A.control_mean AS expression, A.fold, A.l2fold, A.p_value,
A.p_value_adj, A.significant, A.transcript_biotype
FROM %(track)s_DEresults AS A
LEFT JOIN %(quantifier)s_flagged_transcripts AS B
ON A.transcript_id = B.transcript_id
%(where)s
ORDER BY A.significant DESC, A.l2fold ASC
'''
return self.getAll(statement)
class SleuthResultsSig(SleuthResults):
pattern = "(.*)_DEresults$"
direction = ""
where = "WHERE p_value NOT NULL AND significant == 1 AND ABS(l2fold) > 1"
class SleuthAll(IsoformTracker):
pattern = ""
table = ""
def __call__(self, track, slice=None):
statement = '''
SELECT A.*, B.reason as flagged
FROM all_%(table)s_%(track)s AS A
LEFT JOIN %(track)s_flagged_transcripts AS B
ON A.transcript_id = B.transcript_id
'''
return self.getAll(statement)
class SleuthCountsAll(SleuthAll):
pattern = "all_counts_(.*)"
table = "counts"
class SleuthTpmAll(SleuthAll):
pattern = "all_tpm_(.*)"
table = "tpm"
class SummarisedResults(IsoformTracker):
pattern = "(.*)_DEresults$"
def __call__(self, track, slice=None):
quantifier = track.split("_")[-2]
design = "_".join(track.split("_")[:-2]) + "_design"
select_results = '''SELECT A.*,
B.p_value, B.p_value_adj, B.l2fold, B.transcript_biotype
FROM all_tpm_%(quantifier)s AS A
LEFT JOIN %(track)s_DEresults AS B
ON A.transcript_id = B.transcript_id
WHERE B.p_value<0.05'''
results_df = pd.DataFrame(self.getAll(select_results))
select_design = '''SELECT * FROM %(design)s'''
design_df = self.getDataFrame(select_design)
for group in set(design_df['_group']):
group_tracks = design_df[design_df["_group"] == group]['track']
group_tracks = [x.replace("-", "_") for x in group_tracks]
results_df["group_%s_mean" % group] = results_df[group_tracks].mean(axis=1)
results_df["group_%s_stdev" % group] = results_df[group_tracks].std(axis=1)
return results_df
class SleuthAllGenes(IsoformTracker):
pattern = ""
table = ""
def __call__(self, track, slice=None):
statement = "SELECT * FROM all_gene_expression_%(table)s_%(track)s"
return self.getDataFrame(statement)
class SleuthCountsAllGenes(SleuthAllGenes):
pattern = "all_gene_expression_counts_(.*)"
table = "counts"
class SleuthTpmAllGenes(SleuthAllGenes):
pattern = "all_gene_expression_tpm_(.*)"
table = "tpm"
class Deseq2Results(IsoformTracker):
pattern = "(.*)_deseq2_DE_results$"
direction = ""
where = "WHERE p_value NOT NULL"
def __call__(self, track, slice=None):
statement = '''
SELECT
gene_name, gene_id,
control_mean AS expression,
fold, l2fold,
p_value, p_value_adj,
significant
FROM %(track)s_deseq2_DE_results
WHERE p_value NOT NULL
ORDER BY significant DESC, l2fold ASC
'''
return self.getAll(statement)
class Deseq2ResultsSig(Deseq2Results):
pattern = "(.*)_deseq2_DE_results$"
direction = ""
where = "WHERE p_value NOT NULL AND significant == 1 AND ABS(l2fold) > 1"
| mit |
rahul-c1/scikit-learn | sklearn/feature_extraction/tests/test_dict_vectorizer.py | 276 | 3790 | # Authors: Lars Buitinck <[email protected]>
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
| bsd-3-clause |
rhiever/tpot | tpot/config/classifier_sparse.py | 3 | 3726 | # -*- coding: utf-8 -*-
"""This file is part of the TPOT library.
TPOT was primarily developed at the University of Pennsylvania by:
- Randal S. Olson ([email protected])
- Weixuan Fu ([email protected])
- Daniel Angell ([email protected])
- and many more generous open source contributors
TPOT is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
TPOT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with TPOT. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
classifier_config_sparse = {
'tpot.builtins.OneHotEncoder': {
'minimum_fraction': [0.05, 0.1, 0.15, 0.2, 0.25]
},
'sklearn.neighbors.KNeighborsClassifier': {
'n_neighbors': range(1, 101),
'weights': ["uniform", "distance"],
'p': [1, 2]
},
'sklearn.ensemble.RandomForestClassifier': {
'n_estimators': [100],
'criterion': ["gini", "entropy"],
'max_features': np.arange(0.05, 1.01, 0.05),
'min_samples_split': range(2, 21),
'min_samples_leaf': range(1, 21),
'bootstrap': [True, False]
},
'sklearn.feature_selection.SelectFwe': {
'alpha': np.arange(0, 0.05, 0.001),
'score_func': {
'sklearn.feature_selection.f_classif': None
}
},
'sklearn.feature_selection.SelectPercentile': {
'percentile': range(1, 100),
'score_func': {
'sklearn.feature_selection.f_classif': None
}
},
'sklearn.feature_selection.VarianceThreshold': {
'threshold': np.arange(0.05, 1.01, 0.05)
},
'sklearn.feature_selection.RFE': {
'step': np.arange(0.05, 1.01, 0.05),
'estimator': {
'sklearn.ensemble.ExtraTreesClassifier': {
'n_estimators': [100],
'criterion': ['gini', 'entropy'],
'max_features': np.arange(0.05, 1.01, 0.05)
}
}
},
'sklearn.feature_selection.SelectFromModel': {
'threshold': np.arange(0, 1.01, 0.05),
'estimator': {
'sklearn.ensemble.ExtraTreesClassifier': {
'n_estimators': [100],
'criterion': ['gini', 'entropy'],
'max_features': np.arange(0.05, 1.01, 0.05)
}
}
},
'sklearn.linear_model.LogisticRegression': {
'penalty': ["l1", "l2"],
'C': [1e-4, 1e-3, 1e-2, 1e-1, 0.5, 1., 5., 10., 15., 20., 25.],
'dual': [True, False]
},
'sklearn.naive_bayes.BernoulliNB': {
'alpha': [1e-3, 1e-2, 1e-1, 1., 10., 100.],
'fit_prior': [True, False]
},
'sklearn.naive_bayes.MultinomialNB': {
'alpha': [1e-3, 1e-2, 1e-1, 1., 10., 100.],
'fit_prior': [True, False]
},
'sklearn.svm.LinearSVC': {
'penalty': ["l1", "l2"],
'loss': ["hinge", "squared_hinge"],
'dual': [True, False],
'tol': [1e-5, 1e-4, 1e-3, 1e-2, 1e-1],
'C': [1e-4, 1e-3, 1e-2, 1e-1, 0.5, 1., 5., 10., 15., 20., 25.]
},
'xgboost.XGBClassifier': {
'n_estimators': [100],
'max_depth': range(1, 11),
'learning_rate': [1e-3, 1e-2, 1e-1, 0.5, 1.],
'subsample': np.arange(0.05, 1.01, 0.05),
'min_child_weight': range(1, 21),
'nthread': [1]
}
}
| lgpl-3.0 |
pierrepo/PBxplore | pbxplore/scripts/PBstat.py | 2 | 7591 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Statistical analysis and graphical representations of PBs.
Compute Neq, PBs distribution and draw logo representation of PBs.
2013 - P. Poulain, A. G. de Brevern
"""
# ============================================================================
# Modules
# ============================================================================
# Use print as a function for python 3 compatibility
# Standard modules
import os
import sys
import argparse
# Local module
import pbxplore as pbx
# Weblogolib is an optional requirement
try:
import weblogo
except:
IS_WEBLOGO = False
else:
IS_WEBLOGO = True
import matplotlib
# ============================================================================
# Python2/Python3 compatibility
# ============================================================================
# The range function in python 3 behaves as the range function in python 2
# and returns a generator rather than a list. To produce a list in python 3,
# one should use list(range). Here we change range to behave the same in
# python 2 and in python 3. In both cases, range will return a generator.
try:
range = xrange
except NameError:
pass
def user_inputs():
"""
Handle the user parameters for PBstat.py.
Returns
-------
options : the parsed arguments as parsed by `argparse`.
"""
parser = argparse.ArgumentParser(
description="Statistical analysis and graphical representations of PBs.")
# mandatory arguments
parser.add_argument("-f", action="store", required=True,
help="name of file that contains PBs frequency (count)")
parser.add_argument("-o", action="store", required=True,
help="name for results")
# optional arguments
parser.add_argument("--map", action="store_true", default=False, dest="mapdist",
help="generate map of the distribution of PBs along protein sequence")
parser.add_argument("--neq", action="store_true", default=False, dest="neq",
help="compute Neq and generate Neq plot along protein sequence")
parser.add_argument("--logo", action="store_true", default=False, dest="logo",
help="generate logo representation of PBs frequency along protein sequence")
parser.add_argument("--image-format", action='store', type=str,
dest='image_format', default='png',
choices=['pdf', 'png', 'jpg'],
help='File format for all image output.')
parser.add_argument("--residue-min", action="store", type=int,
dest="residue_min", help="defines lower bound of residue frame")
parser.add_argument("--residue-max", action="store", type=int,
dest="residue_max", help="defines upper bound of residue frame")
parser.add_argument('-v', '--version', action='version',
version='%(prog)s {}'.format(pbx.__version__))
# get all parameters
options = parser.parse_args()
# check file
if not os.path.isfile(options.f):
parser.error("{0}: not a valid file".format(options.f))
# Check residues min/max
residues = [options.residue_min, options.residue_max]
for residue in residues:
if residue is not None and residue < 0:
parser.error("residue argument must be >=0")
if None not in residues and options.residue_min >= options.residue_max:
parser.error("residue-min must be < residue-max.")
# Check weblogo
if options.logo:
if not IS_WEBLOGO:
parser.error("Weblogo is not installed; cannot generate the logo image.")
return options
def check_residue_range(residues, residue_min, residue_max):
""""
Ensure that the lower bound and the upper bound parameters are in the range of
the list `residues`.
Parameters
----------
residues : list
the list of residues indexes
residue_min:
the lower bound of residue
residue_max:
the upper bound of residue
Exceptions
----------
IndexError : if `residue_min` or `residue_max` is not in the range
"""
if residue_min is None:
residue_min = residues[0]
if residue_max is None:
residue_max = residues[-1]
if residue_min not in residues:
raise IndexError("residue_min does not belong to the residue range")
if residue_max not in residues:
raise IndexError("residue_max does not belong to the residue range")
if residue_min >= residue_max:
raise IndexError("Lower bound > upper bound")
return residue_min, residue_max
def pbstat_cli():
"""
PBstat command line.
"""
options = user_inputs()
try:
count, residues = pbx.analysis.read_occurence_file(options.f)
# Handle the index of the first residue in the matrix
idx_first_residue = residues[0]
print("Index of first residue in {0} is {1}".format(options.f, idx_first_residue))
residue_min, residue_max = check_residue_range(residues,
options.residue_min, options.residue_max)
except Exception as e:
sys.exit("ERROR: {0}".format(e))
print("First residue in the output file(s) is {0}".format(residue_min))
# Handle output file name...
output_file_name = options.o + ".PB.{0}"
if options.residue_min or options.residue_max:
output_file_name = "{0}.{1}-{2}".format(output_file_name, residue_min, residue_max)
# ... and figure name
output_fig_name = output_file_name + "." + options.image_format
# -------------------------------------------------------------------------------
# generates map of the distribution of PBs along protein sequence
# -------------------------------------------------------------------------------
if options.mapdist:
file_fig_name = output_fig_name.format("map")
pbx.analysis.plot_map(file_fig_name, count, idx_first_residue, residue_min, residue_max)
print("wrote " + file_fig_name)
# -------------------------------------------------------------------------------
# computes Neq and generates neq plot along protein sequence
# -------------------------------------------------------------------------------
if options.neq:
# compute Neq
neq = pbx.analysis.compute_neq(count)
# write Neq
neq_file_name = output_file_name.format("Neq")
with open(neq_file_name, "w") as outfile:
pbx.io.write_neq(outfile, neq, idx_first_residue, residue_min, residue_max)
print("wrote {0}".format(neq_file_name))
# draw Neq
file_fig_name = output_fig_name.format("Neq")
pbx.analysis.plot_neq(file_fig_name, neq, idx_first_residue, residue_min, residue_max)
print("wrote {}".format(file_fig_name))
# -------------------------------------------------------------------------------
# generates logo representation of PBs frequency along protein sequence
# -------------------------------------------------------------------------------
if options.logo:
file_fig_name = output_fig_name.format("logo")
title = options.f.replace(".PB.count", "")
pbx.analysis.generate_weblogo(file_fig_name, count,
idx_first_residue, residue_min, residue_max, title)
print("wrote {}".format(file_fig_name))
if __name__ == '__main__':
pbstat_cli()
| mit |
jessefeinman/FintechHackathon | python-getting-started/nlp/classificationTools.py | 1 | 2927 | import nltk
import os
from random import shuffle
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.svm import SVC, LinearSVC, NuSVC
from datetime import datetime
from nltk import classify, NaiveBayesClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
import pickle
def listOfFiles(flagged):
files = os.listdir(os.getcwd() + "/emailNames" + flagged)
listToReturn = []
for file in files:
with open("emailNames" + flagged + "/" + file, 'r') as names:
listToReturn.append(([word[:-1].lower for word in names], flagged))
names.close()
return listToReturn
documents = listOfFiles("Flagged") + listOfFiles("NotFlagged")
shuffle(documents)
all_words = []
for document in documents:
all_words.extend(document[0])
word_features = nltk.FreqDist(all_words)
def find_features(document):
words = set(document)
features = {}
for w in word_features:
features[w] = (w in words)
return features
featuresets = [(find_features(rev), category) for (rev, category) in documents]
try:
d = SklearnClassifier(MultinomialNB())
d.train(featuresets[:300])
print(classify.accuracy(d, featuresets[300:]))
except:
print("d")
try:
a = NaiveBayesClassifier.train(featuresets[:300])
print(classify.accuracy(a, featuresets[300:]))
except:
print("a")
try:
e = SklearnClassifier(LinearSVC())
e.train(featuresets[:300])
print(classify.accuracy(e, featuresets[300:]))
except:
print("e")
try:
f = SklearnClassifier(SVC(), sparse=False)
f.train(featuresets[:300])
print(classify.accuracy(f, featuresets[300:]))
except:
print("f")
try:
g = SklearnClassifier(LinearSVC())
g.train(featuresets[:300])
print(classify.accuracy(g, featuresets[300:]))
except:
print("g")
try:
h = nltk.classify.DecisionTreeClassifier.train(featuresets[:300], entropy_cutoff=0, support_cutoff=0)
print(classify.accuracy(h, featuresets[300:]))
except:
print("h")
def saveClassifier(classifier):
pickleClassifier = open(classifier.__name__ + datetime.now().strftime('%H:%M:%S') + ".pickle", "wb")
pickle.dump(classifier, pickleClassifier)
pickleClassifier.close()
return classifier
def loadClassifier(name):
pickledClassifier = open(name, "rb")
classifier = pickle.load(pickledClassifier)
pickledClassifier.close()
return classifier
| bsd-2-clause |
savoirfairelinux/opendht | python/tools/dht/tests.py | 1 | 34972 | # -*- coding: utf-8 -*-
# Copyright (C) 2015-2019 Savoir-Faire Linux Inc.
# Author(s): Adrien Béraud <[email protected]>
# Simon Désaulniers <[email protected]>
import sys
import os
import threading
import random
import string
import time
import subprocess
import re
import traceback
import collections
from matplotlib.ticker import FuncFormatter
import math
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
from networkx.drawing.nx_agraph import graphviz_layout
from opendht import *
from dht.network import DhtNetwork, DhtNetworkSubProcess
############
# Common #
############
# matplotlib display format for bits (b, Kb, Mb)
bit_format = None
Kbit_format = FuncFormatter(lambda x, pos: '%1.1f' % (x*1024**-1) + 'Kb')
Mbit_format = FuncFormatter(lambda x, pos: '%1.1f' % (x*1024**-2) + 'Mb')
def random_str_val(size=1024):
"""Creates a random string value of specified size.
@param size: Size, in bytes, of the value.
@type size: int
@return: Random string value
@rtype : str
"""
return ''.join(random.choice(string.hexdigits) for _ in range(size))
def random_hash():
"""Creates random InfoHash.
"""
return InfoHash(random_str_val(size=40).encode())
def timer(f, *args):
"""
Start a timer which count time taken for execute function f
@param f : Function to time
@type f : function
@param args : Arguments of the function f
@type args : list
@rtype : timer
@return : Time taken by the function f
"""
start = time.time()
f(*args)
return time.time() - start
def reset_before_test(featureTestMethod):
"""
This is a decorator for all test methods needing reset().
@param featureTestMethod: The method to be decorated. All decorated methods
must have 'self' object as first arg.
@type featureTestMethod: function
"""
def call(*args, **kwargs):
self = args[0]
if isinstance(self, FeatureTest):
self._reset()
return featureTestMethod(*args, **kwargs)
return call
def display_plot(yvals, xvals=None, yformatter=None, display_time=3, **kwargs):
"""
Displays a plot of data in interactive mode. This method is made to be
called successively for plot refreshing.
@param yvals: Ordinate values (float).
@type yvals: list
@param xvals: Abscissa values (float).
@type xvals: list
@param yformatter: The matplotlib FuncFormatter to use for y values.
@type yformatter: matplotlib.ticker.FuncFormatter
@param displaytime: The time matplotlib can take to refresht the plot.
@type displaytime: int
"""
plt.ion()
plt.clf()
plt.show()
if yformatter:
plt.axes().yaxis.set_major_formatter(Kbit_format)
if xvals:
plt.plot(xvals, yvals, **kwargs)
else:
plt.plot(yvals, **kwargs)
plt.pause(display_time)
def display_traffic_plot(ifname):
"""Displays the traffic plot for a given interface name.
@param ifname: Interface name.
@type ifname: string
"""
ydata = []
xdata = []
# warning: infinite loop
interval = 2
for rate in iftop_traffic_data(ifname, interval=interval):
ydata.append(rate)
xdata.append((xdata[-1] if len(xdata) > 0 else 0) + interval)
display_plot(ydata, xvals=xdata, yformatter=Kbit_format, color='blue')
def iftop_traffic_data(ifname, interval=2, rate_type='send_receive'):
"""
Generator (yields data) function collecting traffic data from iftop
subprocess.
@param ifname: Interface to listen to.
@type ifname: string
@param interval: Interval of time between to data collections. Possible
values are 2, 10 or 40.
@type interval: int
@param rates: (default: send_receive) Wether to pick "send", "receive"
or "send and receive" rates. Possible values : "send",
"receive" and "send_receive".
@type rates: string
@param _format: Format in which to display data on the y axis.
Possible values: Mb, Kb or b.
@type _format: string
"""
# iftop stdout string format
SEND_RATE_STR = "Total send rate"
RECEIVE_RATE_STR = "Total receive rate"
SEND_RECEIVE_RATE_STR = "Total send and receive rate"
RATE_STR = {
"send" : SEND_RATE_STR,
"receive" : RECEIVE_RATE_STR,
"send_receive" : SEND_RECEIVE_RATE_STR
}
TWO_SECONDS_RATE_COL = 0
TEN_SECONDS_RATE_COL = 1
FOURTY_SECONDS_RATE_COL = 2
COLS = {
2 : TWO_SECONDS_RATE_COL,
10 : TEN_SECONDS_RATE_COL,
40 : FOURTY_SECONDS_RATE_COL
}
FLOAT_REGEX = "[0-9]+[.]*[0-9]*"
BIT_REGEX = "[KM]*b"
iftop = subprocess.Popen(["iftop", "-i", ifname, "-t"], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
while True:
line = iftop.stdout.readline().decode()
if RATE_STR[rate_type] in line:
rate, unit = re.findall("("+FLOAT_REGEX+")("+BIT_REGEX+")", line)[COLS[interval]]
rate = float(rate)
if unit == "Kb":
rate *= 1024
elif unit == "Mb":
rate *= 1024**2
yield rate
###########
# Tests #
###########
class FeatureTest(object):
"""
This is a base test.
"""
done = 0
lock = None
def __init__(self, test, workbench):
"""
@param test: The test string indicating the test to run. This string is
determined in the child classes.
@type test: string
@param workbench: A WorkBench object to use inside this test.
@type workbench: WorkBench
"""
self._test = test
self._workbench = workbench
self._bootstrap = self._workbench.get_bootstrap()
def _reset(self):
"""
Resets some static variables.
This method is most likely going to be called before each tests.
"""
FeatureTest.done = 0
FeatureTest.lock = threading.Condition()
def run(self):
raise NotImplementedError('This method must be implemented.')
##################################
# PHT #
##################################
class PhtTest(FeatureTest):
"""TODO
"""
indexEntries = None
prefix = None
key = None
def __init__(self, test, workbench, opts):
"""
@param test: is one of the following:
- 'insert': indexes a considerable amount of data in
the PHT structure.
TODO
@type test: string
@param opts: Dictionnary containing options for the test. Allowed
options are:
- 'num_keys': this specifies the number of keys to insert
in the PHT during the test.
@type opts: dict
"""
super(PhtTest, self).__init__(test, workbench)
self._num_keys = opts['num_keys'] if 'num_keys' in opts else 32
self._timer = True if 'timer' in opts else False
def _reset(self):
super(PhtTest, self)._reset()
PhtTest.indexEntries = []
@staticmethod
def lookupCb(vals, prefix):
PhtTest.indexEntries = list(vals)
PhtTest.prefix = prefix.decode()
DhtNetwork.log('Index name: <todo>')
DhtNetwork.log('Leaf prefix:', prefix)
for v in vals:
DhtNetwork.log('[ENTRY]:', v)
@staticmethod
def lookupDoneCb(ok):
DhtNetwork.log('[LOOKUP]:', PhtTest.key, "--", "success!" if ok else "Fail...")
with FeatureTest.lock:
FeatureTest.lock.notify()
@staticmethod
def insertDoneCb(ok):
DhtNetwork.log('[INSERT]:', PhtTest.key, "--", "success!" if ok else "Fail...")
with FeatureTest.lock:
FeatureTest.lock.notify()
@staticmethod
def drawTrie(trie_dict):
"""
Draws the trie structure of the PHT from dictionnary.
@param trie_dict: Dictionnary of index entries (prefix -> entry).
@type trie_dict: dict
"""
prefixes = list(trie_dict.keys())
if len(prefixes) == 0:
return
edges = list([])
for prefix in prefixes:
for i in range(-1, len(prefix)-1):
u = prefix[:i+1]
x = ("." if i == -1 else u, u+"0")
y = ("." if i == -1 else u, u+"1")
if x not in edges:
edges.append(x)
if y not in edges:
edges.append(y)
# TODO: use a binary tree position layout...
# UPDATE : In a better way [change lib]
G = nx.Graph(sorted(edges, key=lambda x: len(x[0])))
plt.title("PHT: Tree")
pos=graphviz_layout(G,prog='dot')
nx.draw(G, pos, with_labels=True, node_color='white')
plt.show()
def run(self):
try:
if self._test == 'insert':
self._insertTest()
except Exception as e:
print(e)
finally:
self._bootstrap.resize(1)
###########
# Tests #
###########
@reset_before_test
def _insertTest(self):
"""TODO: Docstring for _massIndexTest.
"""
bootstrap = self._bootstrap
bootstrap.resize(2)
dht = bootstrap.get(1)
NUM_DIG = max(math.log(self._num_keys, 2)/4, 5) # at least 5 digit keys.
keyspec = collections.OrderedDict([('foo', NUM_DIG)])
pht = Pht(b'foo_index', keyspec, dht)
DhtNetwork.log('PHT has',
pht.MAX_NODE_ENTRY_COUNT,
'node'+ ('s' if pht.MAX_NODE_ENTRY_COUNT > 1 else ''),
'per leaf bucket.')
keys = [{
[_ for _ in keyspec.keys()][0] :
''.join(random.SystemRandom().choice(string.hexdigits)
for _ in range(NUM_DIG)).encode()
} for n in range(self._num_keys)]
all_entries = {}
# Index all entries.
for key in keys:
PhtTest.key = key
with FeatureTest.lock:
time_taken = timer(pht.insert, key, IndexValue(random_hash()), PhtTest.insertDoneCb)
if self._timer:
DhtNetwork.log('This insert step took : ', time_taken, 'second')
FeatureTest.lock.wait()
time.sleep(1)
# Recover entries now that the trie is complete.
for key in keys:
PhtTest.key = key
with FeatureTest.lock:
time_taken = timer(pht.lookup, key, PhtTest.lookupCb, PhtTest.lookupDoneCb)
if self._timer:
DhtNetwork.log('This lookup step took : ', time_taken, 'second')
FeatureTest.lock.wait()
all_entries[PhtTest.prefix] = [e.__str__()
for e in PhtTest.indexEntries]
for p in all_entries.keys():
DhtNetwork.log('All entries under prefix', p, ':')
DhtNetwork.log(all_entries[p])
PhtTest.drawTrie(all_entries)
##################################
# DHT #
##################################
class DhtFeatureTest(FeatureTest):
"""
This is a base dht test.
"""
#static variables used by class callbacks
successfullTransfer = lambda lv,fv: len(lv) == len(fv)
foreignNodes = None
foreignValues = None
def __init__(self, test, workbench):
super(DhtFeatureTest, self).__init__(test, workbench)
def _reset(self):
super(DhtFeatureTest, self)._reset()
DhtFeatureTest.foreignNodes = []
DhtFeatureTest.foreignValues = []
@staticmethod
def getcb(value):
vstr = value.__str__()[:100]
DhtNetwork.Log.log('[GET]: %s' % vstr + ("..." if len(vstr) > 100 else ""))
DhtFeatureTest.foreignValues.append(value)
return True
@staticmethod
def putDoneCb(ok, nodes):
with FeatureTest.lock:
if not ok:
DhtNetwork.Log.log("[PUT]: failed!")
FeatureTest.done -= 1
FeatureTest.lock.notify()
@staticmethod
def getDoneCb(ok, nodes):
with FeatureTest.lock:
if not ok:
DhtNetwork.Log.log("[GET]: failed!")
else:
for node in nodes:
if not node.getNode().isExpired():
DhtFeatureTest.foreignNodes.append(node.getId().toString())
FeatureTest.done -= 1
FeatureTest.lock.notify()
def _dhtPut(self, producer, _hash, *values):
with FeatureTest.lock:
for val in values:
vstr = val.__str__()[:100]
DhtNetwork.Log.log('[PUT]:', _hash.toString(), '->', vstr + ("..." if len(vstr) > 100 else ""))
FeatureTest.done += 1
producer.put(_hash, val, DhtFeatureTest.putDoneCb)
while FeatureTest.done > 0:
FeatureTest.lock.wait()
def _dhtGet(self, consumer, _hash):
DhtFeatureTest.foreignValues = []
DhtFeatureTest.foreignNodes = []
with FeatureTest.lock:
FeatureTest.done += 1
DhtNetwork.Log.log('[GET]:', _hash.toString())
consumer.get(_hash, DhtFeatureTest.getcb, DhtFeatureTest.getDoneCb)
while FeatureTest.done > 0:
FeatureTest.lock.wait()
def _gottaGetThemAllPokeNodes(self, consumer, hashes, nodes=None):
for h in hashes:
self._dhtGet(consumer, h)
if nodes is not None:
for n in DhtFeatureTest.foreignNodes:
nodes.add(n)
class PersistenceTest(DhtFeatureTest):
"""
This tests persistence of data on the network.
"""
def __init__(self, test, workbench, opts):
"""
@param test: is one of the following:
- 'mult_time': test persistence of data based on internal
OpenDHT storage maintenance timings.
- 'delete': test persistence of data upon deletion of
nodes.
- 'replace': replacing cluster successively.
@type test: string
OPTIONS
- dump_str_log: Enables storage log at test ending.
- keep_alive: Keeps the test running indefinately. This may be useful
to manually analyse the network traffic during a longer
period.
- num_producers: Number of producers of data during a DHT test.
- num_values: Number of values to initialize the DHT with.
"""
# opts
super(PersistenceTest, self).__init__(test, workbench)
self._traffic_plot = True if 'traffic_plot' in opts else False
self._dump_storage = True if 'dump_str_log' in opts else False
self._op_plot = True if 'op_plot' in opts else False
self._keep_alive = True if 'keep_alive' in opts else False
self._num_producers = opts['num_producers'] if 'num_producers' in opts else None
self._num_values = opts['num_values'] if 'num_values' in opts else None
def _trigger_dp(self, trigger_nodes, _hash, count=1):
"""
Triggers the data persistence over time. In order to this, `count` nodes
are created with an id around the hash of a value.
@param trigger_nodes: List of created nodes. The nodes created in this
function are append to this list.
@type trigger_nodes: list
@param _hash: Is the id of the value around which creating nodes.
@type _hash: InfoHash
@param count: The number of nodes to create with id around the id of
value.
@type count: int
"""
_hash_str = _hash.toString().decode()
_hash_int = int(_hash_str, 16)
for i in range(int(-count/2), int(count/2)+1):
_hash_str = '{:40x}'.format(_hash_int + i)
config = DhtConfig()
config.setNodeId(InfoHash(_hash_str.encode()))
n = DhtRunner()
n.run(config=config)
n.bootstrap(self._bootstrap.ip4,
str(self._bootstrap.port))
DhtNetwork.log('Node','['+_hash_str+']',
'started around', _hash.toString().decode()
if n.isRunning() else
'failed to start...'
)
trigger_nodes.append(n)
def _result(self, local_values, new_nodes):
bootstrap = self._bootstrap
if not DhtFeatureTest.successfullTransfer(local_values, DhtFeatureTest.foreignValues):
DhtNetwork.Log.log('[GET]: Only %s on %s values persisted.' %
(len(DhtFeatureTest.foreignValues), len(local_values)))
else:
DhtNetwork.Log.log('[GET]: All values successfully persisted.')
if DhtFeatureTest.foreignValues:
if new_nodes:
DhtNetwork.Log.log('Values are newly found on:')
for node in new_nodes:
DhtNetwork.Log.log(node)
if self._dump_storage:
DhtNetwork.Log.log('Dumping all storage log from '\
'hosting nodes.')
for proc in self._workbench.procs:
proc.sendClusterRequest(DhtNetworkSubProcess.DUMP_STORAGE_REQ, DhtFeatureTest.foreignNodes)
else:
DhtNetwork.Log.log("Values didn't reach new hosting nodes after shutdown.")
def run(self):
try:
if self._test == 'normal':
self._totallyNormalTest()
elif self._test == 'delete':
self._deleteTest()
elif self._test == 'replace':
self._replaceClusterTest()
elif self._test == 'mult_time':
self._multTimeTest()
else:
raise NameError("This test is not defined '" + self._test + "'")
except Exception as e:
traceback.print_tb(e.__traceback__)
print(type(e).__name__+':', e, file=sys.stderr)
finally:
if self._traffic_plot or self._op_plot:
plot_fname = "traffic-plot"
print('plot saved to', plot_fname)
plt.savefig(plot_fname)
self._bootstrap.resize(1)
###########
# Tests #
###########
@reset_before_test
def _totallyNormalTest(self):
"""
Reproduces a network in a realistic state.
"""
trigger_nodes = []
wb = self._workbench
bootstrap = self._bootstrap
# Value representing an ICE packet. Each ICE packet is around 1KB.
VALUE_SIZE = 1024
num_values_per_hash = self._num_values/wb.node_num if self._num_values else 5
# nodes and values counters
total_nr_values = 0
nr_nodes = wb.node_num
op_cv = threading.Condition()
# values string in string format. Used for sending cluster request.
hashes = [random_hash() for _ in range(wb.node_num)]
def normalBehavior(do, t):
nonlocal total_nr_values, op_cv
while True:
with op_cv:
do()
time.sleep(random.uniform(0.0, float(t)))
def putRequest():
nonlocal hashes, VALUE_SIZE, total_nr_values
lock = threading.Condition()
def dcb(success):
nonlocal total_nr_values, lock
if success:
total_nr_values += 1
DhtNetwork.Log.log("INFO: "+ str(total_nr_values)+" values put on the dht since begining")
with lock:
lock.notify()
with lock:
DhtNetwork.Log.warn("Random value put on the DHT...")
random.choice(wb.procs).sendClusterPutRequest(random.choice(hashes).toString(),
random_str_val(size=VALUE_SIZE).encode(),
done_cb=dcb)
lock.wait()
puts = threading.Thread(target=normalBehavior, args=(putRequest, 30.0/wb.node_num))
puts.daemon = True
puts.start()
def newNodeRequest():
nonlocal nr_nodes
lock = threading.Condition()
def dcb(success):
nonlocal nr_nodes, lock
nr_nodes += 1
DhtNetwork.Log.log("INFO: now "+str(nr_nodes)+" nodes on the dht")
with lock:
lock.notify()
with lock:
DhtNetwork.Log.warn("Node joining...")
random.choice(wb.procs).sendClusterRequest(DhtNetworkSubProcess.NEW_NODE_REQ, done_cb=dcb)
lock.wait()
connections = threading.Thread(target=normalBehavior, args=(newNodeRequest, 1*50.0/wb.node_num))
connections.daemon = True
connections.start()
def shutdownNodeRequest():
nonlocal nr_nodes
lock = threading.Condition()
def dcb(success):
nonlocal nr_nodes, lock
if success:
nr_nodes -= 1
DhtNetwork.Log.log("INFO: now "+str(nr_nodes)+" nodes on the dht")
else:
DhtNetwork.Log.err("Oops.. No node to shutodwn.")
with lock:
lock.notify()
with lock:
DhtNetwork.Log.warn("Node shutting down...")
random.choice(wb.procs).sendClusterRequest(DhtNetworkSubProcess.SHUTDOWN_NODE_REQ, done_cb=dcb)
lock.wait()
shutdowns = threading.Thread(target=normalBehavior, args=(shutdownNodeRequest, 1*60.0/wb.node_num))
shutdowns.daemon = True
shutdowns.start()
if self._traffic_plot:
display_traffic_plot('br'+wb.ifname)
else:
# blocks in matplotlib thread
while True:
plt.pause(3600)
@reset_before_test
def _deleteTest(self):
"""
It uses Dht shutdown call from the API to gracefuly finish the nodes one
after the other.
"""
bootstrap = self._bootstrap
ops_count = []
bootstrap.resize(3)
consumer = bootstrap.get(1)
producer = bootstrap.get(2)
myhash = random_hash()
local_values = [Value(b'foo'), Value(b'bar'), Value(b'foobar')]
self._dhtPut(producer, myhash, *local_values)
#checking if values were transfered
self._dhtGet(consumer, myhash)
if not DhtFeatureTest.successfullTransfer(local_values, DhtFeatureTest.foreignValues):
if DhtFeatureTest.foreignValues:
DhtNetwork.Log.log('[GET]: Only ', len(DhtFeatureTest.foreignValues) ,' on ',
len(local_values), ' values successfully put.')
else:
DhtNetwork.Log.log('[GET]: 0 values successfully put')
if DhtFeatureTest.foreignValues and DhtFeatureTest.foreignNodes:
DhtNetwork.Log.log('Values are found on :')
for node in DhtFeatureTest.foreignNodes:
DhtNetwork.Log.log(node)
for _ in range(max(1, int(self._workbench.node_num/32))):
DhtNetwork.Log.log('Removing all nodes hosting target values...')
cluster_ops_count = 0
for proc in self._workbench.procs:
DhtNetwork.Log.log('[REMOVE]: sending shutdown request to', proc)
lock = threading.Condition()
def dcb(success):
nonlocal lock
if not success:
DhtNetwork.Log.err("Failed to shutdown.")
with lock:
lock.notify()
with lock:
proc.sendClusterRequest(
DhtNetworkSubProcess.SHUTDOWN_NODE_REQ,
DhtFeatureTest.foreignNodes,
done_cb=dcb
)
lock.wait()
DhtNetwork.Log.log('sending message stats request')
def msg_dcb(stats):
nonlocal cluster_ops_count, lock
if stats:
cluster_ops_count += sum(stats[1:])
with lock:
lock.notify()
with lock:
proc.sendGetMessageStats(done_cb=msg_dcb)
lock.wait()
DhtNetwork.Log.log("5 seconds wait...")
time.sleep(5)
ops_count.append(cluster_ops_count/self._workbench.node_num)
# checking if values were transfered to new nodes
foreignNodes_before_delete = DhtFeatureTest.foreignNodes
DhtNetwork.Log.log('[GET]: trying to fetch persistent values')
self._dhtGet(consumer, myhash)
new_nodes = set(DhtFeatureTest.foreignNodes) - set(foreignNodes_before_delete)
self._result(local_values, new_nodes)
if self._op_plot:
display_plot(ops_count, color='blue')
else:
DhtNetwork.Log.log("[GET]: either couldn't fetch values or nodes hosting values...")
if traffic_plot_thread:
print("Traffic plot running for ever. Ctrl-c for stopping it.")
traffic_plot_thread.join()
@reset_before_test
def _replaceClusterTest(self):
"""
It replaces all clusters one after the other.
"""
clusters = 8
bootstrap = self._bootstrap
bootstrap.resize(3)
consumer = bootstrap.get(1)
producer = bootstrap.get(2)
myhash = random_hash()
local_values = [Value(b'foo'), Value(b'bar'), Value(b'foobar')]
self._dhtPut(producer, myhash, *local_values)
self._dhtGet(consumer, myhash)
initial_nodes = DhtFeatureTest.foreignNodes
DhtNetwork.Log.log('Replacing', clusters, 'random clusters successively...')
for n in range(clusters):
i = random.randint(0, len(self._workbench.procs)-1)
proc = self._workbench.procs[i]
DhtNetwork.Log.log('Replacing', proc)
proc.sendClusterRequest(DhtNetworkSubProcess.SHUTDOWN_CLUSTER_REQ)
self._workbench.stop_cluster(i)
self._workbench.start_cluster(i)
DhtNetwork.Log.log('[GET]: trying to fetch persistent values')
self._dhtGet(consumer, myhash)
new_nodes = set(DhtFeatureTest.foreignNodes) - set(initial_nodes)
self._result(local_values, new_nodes)
@reset_before_test
def _multTimeTest(self):
"""
Multiple put() calls are made from multiple nodes to multiple hashes
after what a set of 8 nodes is created around each hashes in order to
enable storage maintenance each nodes. Therefor, this tests will wait 10
minutes for the nodes to trigger storage maintenance.
"""
trigger_nodes = []
bootstrap = self._bootstrap
N_PRODUCERS = self._num_producers if self._num_values else 16
DP_TIMEOUT = 1
hashes = []
# Generating considerable amount of values of size 1KB.
VALUE_SIZE = 1024
NUM_VALUES = self._num_values if self._num_values else 50
values = [Value(random_str_val(size=VALUE_SIZE).encode()) for _ in range(NUM_VALUES)]
bootstrap.resize(N_PRODUCERS+2)
consumer = bootstrap.get(N_PRODUCERS+1)
producers = (bootstrap.get(n) for n in range(1,N_PRODUCERS+1))
for p in producers:
hashes.append(random_hash())
self._dhtPut(p, hashes[-1], *values)
once = True
while self._keep_alive or once:
nodes = set([])
self._gottaGetThemAllPokeNodes(consumer, hashes, nodes=nodes)
DhtNetwork.Log.log("Values are found on:")
for n in nodes:
DhtNetwork.Log.log(n)
DhtNetwork.Log.log("Creating 8 nodes around all of these hashes...")
for _hash in hashes:
self._trigger_dp(trigger_nodes, _hash, count=8)
DhtNetwork.Log.log('Waiting', DP_TIMEOUT+1, 'minutes for normal storage maintenance.')
time.sleep((DP_TIMEOUT+1)*60)
DhtNetwork.Log.log('Deleting old nodes from previous search.')
for proc in self._workbench.procs:
DhtNetwork.Log.log('[REMOVE]: sending delete request to', proc)
proc.sendClusterRequest(
DhtNetworkSubProcess.REMOVE_NODE_REQ,
nodes)
# new consumer (fresh cache)
bootstrap.resize(N_PRODUCERS+1)
bootstrap.resize(N_PRODUCERS+2)
consumer = bootstrap.get(N_PRODUCERS+1)
nodes_after_time = set([])
self._gottaGetThemAllPokeNodes(consumer, hashes, nodes=nodes_after_time)
self._result(values, nodes_after_time - nodes)
once = False
class PerformanceTest(DhtFeatureTest):
"""
Tests for general performance of dht operations.
"""
def __init__(self, test, workbench, opts):
"""
@param test: is one of the following:
- 'gets': multiple get operations and statistical results.
- 'delete': perform multiple put() operations followed
by targeted deletion of nodes hosting the values. Doing
so until half of the nodes on the network remain.
@type test: string
"""
super(PerformanceTest, self).__init__(test, workbench)
def run(self):
try:
if self._test == 'gets':
self._getsTimesTest()
elif self._test == 'delete':
self._delete()
else:
raise NameError("This test is not defined '" + self._test + "'")
except Exception as e:
traceback.print_tb(e.__traceback__)
print(type(e).__name__+':', e, file=sys.stderr)
finally:
self._bootstrap.resize(1)
###########
# Tests #
###########
@reset_before_test
def _getsTimesTest(self):
"""
Tests for performance of the DHT doing multiple get() operation.
"""
bootstrap = self._bootstrap
plt.ion()
fig, axes = plt.subplots(2, 1)
fig.tight_layout()
lax = axes[0]
hax = axes[1]
lines = None#ax.plot([])
#plt.ylabel('time (s)')
hax.set_ylim(0, 2)
# let the network stabilise
plt.pause(20)
#start = time.time()
times = []
lock = threading.Condition()
done = 0
def getcb(v):
nonlocal bootstrap
DhtNetwork.Log.log("found", v)
return True
def donecb(ok, nodes, start):
nonlocal bootstrap, lock, done, times
t = time.time()-start
with lock:
if not ok:
DhtNetwork.Log.log("failed !")
times.append(t)
done -= 1
lock.notify()
def update_plot():
nonlocal lines
while lines:
l = lines.pop()
l.remove()
del l
if len(times) > 1:
n, bins, lines = hax.hist(times, 100, normed=1, histtype='stepfilled', color='g')
hax.set_ylim(min(n), max(n))
lines.extend(lax.plot(times, color='blue'))
plt.draw()
def run_get():
nonlocal done
done += 1
start = time.time()
bootstrap.front().get(InfoHash.getRandom(), getcb, lambda ok, nodes: donecb(ok, nodes, start))
plt.pause(5)
plt.show()
update_plot()
times = []
for n in range(10):
self._workbench.replace_cluster()
plt.pause(2)
DhtNetwork.Log.log("Getting 50 random hashes succesively.")
for i in range(50):
with lock:
for _ in range(1):
run_get()
while done > 0:
lock.wait()
update_plot()
plt.pause(.1)
update_plot()
print("Took", np.sum(times), "mean", np.mean(times), "std", np.std(times), "min", np.min(times), "max", np.max(times))
print('GET calls timings benchmark test : DONE. ' \
'Close Matplotlib window for terminating the program.')
plt.ioff()
plt.show()
@reset_before_test
def _delete(self):
"""
Tests for performance of get() and put() operations on the network while
deleting around the target hash.
"""
bootstrap = self._bootstrap
bootstrap.resize(3)
consumer = bootstrap.get(1)
producer = bootstrap.get(2)
myhash = random_hash()
local_values = [Value(b'foo'), Value(b'bar'), Value(b'foobar')]
for _ in range(max(1, int(self._workbench.node_num/32))):
self._dhtGet(consumer, myhash)
DhtNetwork.Log.log("Waiting 15 seconds...")
time.sleep(15)
self._dhtPut(producer, myhash, *local_values)
#checking if values were transfered
self._dhtGet(consumer, myhash)
DhtNetwork.Log.log('Values are found on :')
for node in DhtFeatureTest.foreignNodes:
DhtNetwork.Log.log(node)
if not DhtFeatureTest.successfullTransfer(local_values, DhtFeatureTest.foreignValues):
if DhtFeatureTest.foreignValues:
DhtNetwork.Log.log('[GET]: Only ', len(DhtFeatureTest.foreignValues) ,' on ',
len(local_values), ' values successfully put.')
else:
DhtNetwork.Log.log('[GET]: 0 values successfully put')
DhtNetwork.Log.log('Removing all nodes hosting target values...')
for proc in self._workbench.procs:
DhtNetwork.Log.log('[REMOVE]: sending shutdown request to', proc)
proc.sendClusterRequest(
DhtNetworkSubProcess.SHUTDOWN_NODE_REQ,
DhtFeatureTest.foreignNodes
)
| gpl-3.0 |
cogeorg/black_rhino | examples/Georg2012/networkx/drawing/nx_pylab.py | 10 | 27884 | """
**********
Matplotlib
**********
Draw networks with matplotlib (pylab).
See Also
--------
matplotlib: http://matplotlib.sourceforge.net/
pygraphviz: http://networkx.lanl.gov/pygraphviz/
"""
# Copyright (C) 2004-2012 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.drawing.layout import shell_layout,\
circular_layout,spectral_layout,spring_layout,random_layout
__author__ = """Aric Hagberg ([email protected])"""
__all__ = ['draw',
'draw_networkx',
'draw_networkx_nodes',
'draw_networkx_edges',
'draw_networkx_labels',
'draw_networkx_edge_labels',
'draw_circular',
'draw_random',
'draw_spectral',
'draw_spring',
'draw_shell',
'draw_graphviz']
def draw(G, pos=None, ax=None, hold=None, **kwds):
"""Draw the graph G with Matplotlib (pylab).
Draw the graph as a simple representation with no node
labels or edge labels and using the full Matplotlib figure area
and no axis labels by default. See draw_networkx() for more
full-featured drawing that allows title, axis labels etc.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
ax : Matplotlib Axes object, optional
Draw the graph in specified Matplotlib axes.
hold : bool, optional
Set the Matplotlib hold state. If True subsequent draw
commands will be added to the current axes.
**kwds : optional keywords
See networkx.draw_networkx() for a description of optional keywords.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G)
>>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout
See Also
--------
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
Notes
-----
This function has the same name as pylab.draw and pyplot.draw
so beware when using
>>> from networkx import *
since you might overwrite the pylab.draw function.
Good alternatives are:
With pylab:
>>> import pylab as P #
>>> import networkx as nx
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G) # networkx draw()
>>> P.draw() # pylab draw()
With pyplot
>>> import matplotlib.pyplot as plt
>>> import networkx as nx
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G) # networkx draw()
>>> plt.draw() # pyplot draw()
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
"""
try:
import matplotlib.pylab as pylab
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
cf=pylab.gcf()
cf.set_facecolor('w')
if ax is None:
if cf._axstack() is None:
ax=cf.add_axes((0,0,1,1))
else:
ax=cf.gca()
# allow callers to override the hold state by passing hold=True|False
b = pylab.ishold()
h = kwds.pop('hold', None)
if h is not None:
pylab.hold(h)
try:
draw_networkx(G,pos=pos,ax=ax,**kwds)
ax.set_axis_off()
pylab.draw_if_interactive()
except:
pylab.hold(b)
raise
pylab.hold(b)
return
def draw_networkx(G, pos=None, with_labels=True, **kwds):
"""Draw the graph G using Matplotlib.
Draw the graph with Matplotlib with options for node positions,
labeling, titles, and many other drawing features.
See draw() for simple drawing without labels or axes.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
with_labels : bool, optional (default=True)
Set to True to draw labels on the nodes.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
nodelist : list, optional (default G.nodes())
Draw only specified nodes
edgelist : list, optional (default=G.edges())
Draw only specified edges
node_size : scalar or array, optional (default=300)
Size of nodes. If an array is specified it must be the
same length as nodelist.
node_color : color string, or array of floats, (default='r')
Node color. Can be a single color format string,
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
node_shape : string, optional (default='o')
The shape of the node. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8'.
alpha : float, optional (default=1.0)
The node transparency
cmap : Matplotlib colormap, optional (default=None)
Colormap for mapping intensities of nodes
vmin,vmax : float, optional (default=None)
Minimum and maximum for node colormap scaling
linewidths : [None | scalar | sequence]
Line width of symbol border (default =1.0)
width : float, optional (default=1.0)
Line width of edges
edge_color : color string, or array of floats (default='r')
Edge color. Can be a single color format string,
or a sequence of colors with the same length as edgelist.
If numeric values are specified they will be mapped to
colors using the edge_cmap and edge_vmin,edge_vmax parameters.
edge_ cmap : Matplotlib colormap, optional (default=None)
Colormap for mapping intensities of edges
edge_vmin,edge_vmax : floats, optional (default=None)
Minimum and maximum for edge colormap scaling
style : string, optional (deafult='solid')
Edge line style (solid|dashed|dotted,dashdot)
labels : dictionary, optional (deafult=None)
Node labels in a dictionary keyed by node of text labels
font_size : int, optional (default=12)
Font size for text labels
font_color : string, optional (default='k' black)
Font color string
font_weight : string, optional (default='normal')
Font weight
font_family : string, optional (default='sans-serif')
Font family
label : string, optional
Label for graph legend
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G)
>>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout
>>> import pylab
>>> limits=pylab.axis('off') # turn of axis
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pylab as pylab
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if pos is None:
pos=nx.drawing.spring_layout(G) # default to spring layout
node_collection=draw_networkx_nodes(G, pos, **kwds)
edge_collection=draw_networkx_edges(G, pos, **kwds)
if with_labels:
draw_networkx_labels(G, pos, **kwds)
pylab.draw_if_interactive()
def draw_networkx_nodes(G, pos,
nodelist=None,
node_size=300,
node_color='r',
node_shape='o',
alpha=1.0,
cmap=None,
vmin=None,
vmax=None,
ax=None,
linewidths=None,
label = None,
**kwds):
"""Draw the nodes of the graph G.
This draws only the nodes of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
nodelist : list, optional
Draw only specified nodes (default G.nodes())
node_size : scalar or array
Size of nodes (default=300). If an array is specified it must be the
same length as nodelist.
node_color : color string, or array of floats
Node color. Can be a single color format string (default='r'),
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
node_shape : string
The shape of the node. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8' (default='o').
alpha : float
The node transparency (default=1.0)
cmap : Matplotlib colormap
Colormap for mapping intensities of nodes (default=None)
vmin,vmax : floats
Minimum and maximum for node colormap scaling (default=None)
linewidths : [None | scalar | sequence]
Line width of symbol border (default =1.0)
label : [None| string]
Label for legend
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nodes=nx.draw_networkx_nodes(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pylab as pylab
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax=pylab.gca()
if nodelist is None:
nodelist=G.nodes()
if not nodelist or len(nodelist)==0: # empty nodelist, no drawing
return None
try:
xy=numpy.asarray([pos[v] for v in nodelist])
except KeyError as e:
raise nx.NetworkXError('Node %s has no position.'%e)
except ValueError:
raise nx.NetworkXError('Bad value in node positions.')
node_collection=ax.scatter(xy[:,0], xy[:,1],
s=node_size,
c=node_color,
marker=node_shape,
cmap=cmap,
vmin=vmin,
vmax=vmax,
alpha=alpha,
linewidths=linewidths,
label=label)
# pylab.axes(ax)
pylab.sci(node_collection)
node_collection.set_zorder(2)
return node_collection
def draw_networkx_edges(G, pos,
edgelist=None,
width=1.0,
edge_color='k',
style='solid',
alpha=None,
edge_cmap=None,
edge_vmin=None,
edge_vmax=None,
ax=None,
arrows=True,
label=None,
**kwds):
"""Draw the edges of the graph G.
This draws only the edges of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
edgelist : collection of edge tuples
Draw only specified edges(default=G.edges())
width : float
Line width of edges (default =1.0)
edge_color : color string, or array of floats
Edge color. Can be a single color format string (default='r'),
or a sequence of colors with the same length as edgelist.
If numeric values are specified they will be mapped to
colors using the edge_cmap and edge_vmin,edge_vmax parameters.
style : string
Edge line style (default='solid') (solid|dashed|dotted,dashdot)
alpha : float
The edge transparency (default=1.0)
edge_ cmap : Matplotlib colormap
Colormap for mapping intensities of edges (default=None)
edge_vmin,edge_vmax : floats
Minimum and maximum for edge colormap scaling (default=None)
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
arrows : bool, optional (default=True)
For directed graphs, if True draw arrowheads.
label : [None| string]
Label for legend
Notes
-----
For directed graphs, "arrows" (actually just thicker stubs) are drawn
at the head end. Arrows can be turned off with keyword arrows=False.
Yes, it is ugly but drawing proper arrows with Matplotlib this
way is tricky.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> edges=nx.draw_networkx_edges(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib
import matplotlib.pylab as pylab
import matplotlib.cbook as cb
from matplotlib.colors import colorConverter,Colormap
from matplotlib.collections import LineCollection
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax=pylab.gca()
if edgelist is None:
edgelist=G.edges()
if not edgelist or len(edgelist)==0: # no edges!
return None
# set edge positions
edge_pos=numpy.asarray([(pos[e[0]],pos[e[1]]) for e in edgelist])
if not cb.iterable(width):
lw = (width,)
else:
lw = width
if not cb.is_string_like(edge_color) \
and cb.iterable(edge_color) \
and len(edge_color)==len(edge_pos):
if numpy.alltrue([cb.is_string_like(c)
for c in edge_color]):
# (should check ALL elements)
# list of color letters such as ['k','r','k',...]
edge_colors = tuple([colorConverter.to_rgba(c,alpha)
for c in edge_color])
elif numpy.alltrue([not cb.is_string_like(c)
for c in edge_color]):
# If color specs are given as (rgb) or (rgba) tuples, we're OK
if numpy.alltrue([cb.iterable(c) and len(c) in (3,4)
for c in edge_color]):
edge_colors = tuple(edge_color)
else:
# numbers (which are going to be mapped with a colormap)
edge_colors = None
else:
raise ValueError('edge_color must consist of either color names or numbers')
else:
if cb.is_string_like(edge_color) or len(edge_color)==1:
edge_colors = ( colorConverter.to_rgba(edge_color, alpha), )
else:
raise ValueError('edge_color must be a single color or list of exactly m colors where m is the number or edges')
edge_collection = LineCollection(edge_pos,
colors = edge_colors,
linewidths = lw,
antialiaseds = (1,),
linestyle = style,
transOffset = ax.transData,
)
edge_collection.set_zorder(1) # edges go behind nodes
edge_collection.set_label(label)
ax.add_collection(edge_collection)
# Note: there was a bug in mpl regarding the handling of alpha values for
# each line in a LineCollection. It was fixed in matplotlib in r7184 and
# r7189 (June 6 2009). We should then not set the alpha value globally,
# since the user can instead provide per-edge alphas now. Only set it
# globally if provided as a scalar.
if cb.is_numlike(alpha):
edge_collection.set_alpha(alpha)
if edge_colors is None:
if edge_cmap is not None:
assert(isinstance(edge_cmap, Colormap))
edge_collection.set_array(numpy.asarray(edge_color))
edge_collection.set_cmap(edge_cmap)
if edge_vmin is not None or edge_vmax is not None:
edge_collection.set_clim(edge_vmin, edge_vmax)
else:
edge_collection.autoscale()
pylab.sci(edge_collection)
arrow_collection=None
if G.is_directed() and arrows:
# a directed graph hack
# draw thick line segments at head end of edge
# waiting for someone else to implement arrows that will work
arrow_colors = edge_colors
a_pos=[]
p=1.0-0.25 # make head segment 25 percent of edge length
for src,dst in edge_pos:
x1,y1=src
x2,y2=dst
dx=x2-x1 # x offset
dy=y2-y1 # y offset
d=numpy.sqrt(float(dx**2+dy**2)) # length of edge
if d==0: # source and target at same position
continue
if dx==0: # vertical edge
xa=x2
ya=dy*p+y1
if dy==0: # horizontal edge
ya=y2
xa=dx*p+x1
else:
theta=numpy.arctan2(dy,dx)
xa=p*d*numpy.cos(theta)+x1
ya=p*d*numpy.sin(theta)+y1
a_pos.append(((xa,ya),(x2,y2)))
arrow_collection = LineCollection(a_pos,
colors = arrow_colors,
linewidths = [4*ww for ww in lw],
antialiaseds = (1,),
transOffset = ax.transData,
)
arrow_collection.set_zorder(1) # edges go behind nodes
arrow_collection.set_label(label)
ax.add_collection(arrow_collection)
# update view
minx = numpy.amin(numpy.ravel(edge_pos[:,:,0]))
maxx = numpy.amax(numpy.ravel(edge_pos[:,:,0]))
miny = numpy.amin(numpy.ravel(edge_pos[:,:,1]))
maxy = numpy.amax(numpy.ravel(edge_pos[:,:,1]))
w = maxx-minx
h = maxy-miny
padx, pady = 0.05*w, 0.05*h
corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady)
ax.update_datalim( corners)
ax.autoscale_view()
# if arrow_collection:
return edge_collection
def draw_networkx_labels(G, pos,
labels=None,
font_size=12,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0,
ax=None,
**kwds):
"""Draw node labels on the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
font_size : int
Font size for text labels (default=12)
font_color : string
Font color string (default='k' black)
font_family : string
Font family (default='sans-serif')
font_weight : string
Font weight (default='normal')
alpha : float
The text transparency (default=1.0)
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> labels=nx.draw_networkx_labels(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pylab as pylab
import matplotlib.cbook as cb
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax=pylab.gca()
if labels is None:
labels=dict( (n,n) for n in G.nodes())
# set optional alignment
horizontalalignment=kwds.get('horizontalalignment','center')
verticalalignment=kwds.get('verticalalignment','center')
text_items={} # there is no text collection so we'll fake one
for n, label in labels.items():
(x,y)=pos[n]
if not cb.is_string_like(label):
label=str(label) # this will cause "1" and 1 to be labeled the same
t=ax.text(x, y,
label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
transform = ax.transData,
clip_on=True,
)
text_items[n]=t
return text_items
def draw_networkx_edge_labels(G, pos,
edge_labels=None,
label_pos=0.5,
font_size=10,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0,
bbox=None,
ax=None,
rotate=True,
**kwds):
"""Draw edge labels.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
alpha : float
The text transparency (default=1.0)
edge_labels : dictionary
Edge labels in a dictionary keyed by edge two-tuple of text
labels (default=None). Only labels for the keys in the dictionary
are drawn.
label_pos : float
Position of edge label along edge (0=head, 0.5=center, 1=tail)
font_size : int
Font size for text labels (default=12)
font_color : string
Font color string (default='k' black)
font_weight : string
Font weight (default='normal')
font_family : string
Font family (default='sans-serif')
bbox : Matplotlib bbox
Specify text box shape and colors.
clip_on : bool
Turn on clipping at axis boundaries (default=True)
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> edge_labels=nx.draw_networkx_edge_labels(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
"""
try:
import matplotlib.pylab as pylab
import matplotlib.cbook as cb
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax=pylab.gca()
if edge_labels is None:
labels=dict( ((u,v), d) for u,v,d in G.edges(data=True) )
else:
labels = edge_labels
text_items={}
for (n1,n2), label in labels.items():
(x1,y1)=pos[n1]
(x2,y2)=pos[n2]
(x,y) = (x1 * label_pos + x2 * (1.0 - label_pos),
y1 * label_pos + y2 * (1.0 - label_pos))
if rotate:
angle=numpy.arctan2(y2-y1,x2-x1)/(2.0*numpy.pi)*360 # degrees
# make label orientation "right-side-up"
if angle > 90:
angle-=180
if angle < - 90:
angle+=180
# transform data coordinate angle to screen coordinate angle
xy=numpy.array((x,y))
trans_angle=ax.transData.transform_angles(numpy.array((angle,)),
xy.reshape((1,2)))[0]
else:
trans_angle=0.0
# use default box of white with white border
if bbox is None:
bbox = dict(boxstyle='round',
ec=(1.0, 1.0, 1.0),
fc=(1.0, 1.0, 1.0),
)
if not cb.is_string_like(label):
label=str(label) # this will cause "1" and 1 to be labeled the same
# set optional alignment
horizontalalignment=kwds.get('horizontalalignment','center')
verticalalignment=kwds.get('verticalalignment','center')
t=ax.text(x, y,
label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
rotation=trans_angle,
transform = ax.transData,
bbox = bbox,
zorder = 1,
clip_on=True,
)
text_items[(n1,n2)]=t
return text_items
def draw_circular(G, **kwargs):
"""Draw the graph G with a circular layout."""
draw(G,circular_layout(G),**kwargs)
def draw_random(G, **kwargs):
"""Draw the graph G with a random layout."""
draw(G,random_layout(G),**kwargs)
def draw_spectral(G, **kwargs):
"""Draw the graph G with a spectral layout."""
draw(G,spectral_layout(G),**kwargs)
def draw_spring(G, **kwargs):
"""Draw the graph G with a spring layout."""
draw(G,spring_layout(G),**kwargs)
def draw_shell(G, **kwargs):
"""Draw networkx graph with shell layout."""
nlist = kwargs.get('nlist', None)
if nlist != None:
del(kwargs['nlist'])
draw(G,shell_layout(G,nlist=nlist),**kwargs)
def draw_graphviz(G, prog="neato", **kwargs):
"""Draw networkx graph with graphviz layout."""
pos=nx.drawing.graphviz_layout(G,prog)
draw(G,pos,**kwargs)
def draw_nx(G,pos,**kwds):
"""For backward compatibility; use draw or draw_networkx."""
draw(G,pos,**kwds)
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import matplotlib as mpl
mpl.use('PS',warn=False)
import pylab
except:
raise SkipTest("matplotlib not available")
| gpl-3.0 |
cl4rke/scikit-learn | sklearn/tests/test_lda.py | 71 | 5883 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import make_blobs
from sklearn import lda
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct values
# for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = lda.LDA(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = lda.LDA(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = lda.LDA(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = lda.LDA(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = lda.LDA(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = lda.LDA(solver="svd")
clf_lda_lsqr = lda.LDA(solver="lsqr")
clf_lda_eigen = lda.LDA(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = lda.LDA(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = lda.LDA(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = lda.LDA(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = lda.LDA(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = lda.LDA(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
| bsd-3-clause |
madelynfreed/rlundo | venv/lib/python2.7/site-packages/IPython/extensions/rmagic.py | 4 | 23016 | # -*- coding: utf-8 -*-
"""
======
Rmagic
======
Magic command interface for interactive work with R via rpy2
.. note::
The ``rpy2`` package needs to be installed separately. It
can be obtained using ``easy_install`` or ``pip``.
You will also need a working copy of R.
Usage
=====
To enable the magics below, execute ``%load_ext rmagic``.
``%R``
{R_DOC}
``%Rpush``
{RPUSH_DOC}
``%Rpull``
{RPULL_DOC}
``%Rget``
{RGET_DOC}
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Copyright (C) 2012 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import sys
import tempfile
from glob import glob
from shutil import rmtree
import warnings
# numpy and rpy2 imports
import numpy as np
import rpy2.rinterface as ri
import rpy2.robjects as ro
try:
from rpy2.robjects import pandas2ri
pandas2ri.activate()
except ImportError:
pandas2ri = None
from rpy2.robjects import numpy2ri
numpy2ri.activate()
# IPython imports
from IPython.core.displaypub import publish_display_data
from IPython.core.magic import (Magics, magics_class, line_magic,
line_cell_magic, needs_local_scope)
from IPython.testing.skipdoctest import skip_doctest
from IPython.core.magic_arguments import (
argument, magic_arguments, parse_argstring
)
from IPython.external.simplegeneric import generic
from IPython.utils.py3compat import (str_to_unicode, unicode_to_str, PY3,
unicode_type)
from IPython.utils.text import dedent
class RInterpreterError(ri.RRuntimeError):
"""An error when running R code in a %%R magic cell."""
def __init__(self, line, err, stdout):
self.line = line
self.err = err.rstrip()
self.stdout = stdout.rstrip()
def __unicode__(self):
s = 'Failed to parse and evaluate line %r.\nR error message: %r' % \
(self.line, self.err)
if self.stdout and (self.stdout != self.err):
s += '\nR stdout:\n' + self.stdout
return s
if PY3:
__str__ = __unicode__
else:
def __str__(self):
return unicode_to_str(unicode(self), 'utf-8')
def Rconverter(Robj, dataframe=False):
"""
Convert an object in R's namespace to one suitable
for ipython's namespace.
For a data.frame, it tries to return a structured array.
It first checks for colnames, then names.
If all are NULL, it returns np.asarray(Robj), else
it tries to construct a recarray
Parameters
----------
Robj: an R object returned from rpy2
"""
is_data_frame = ro.r('is.data.frame')
colnames = ro.r('colnames')
rownames = ro.r('rownames') # with pandas, these could be used for the index
names = ro.r('names')
if dataframe:
as_data_frame = ro.r('as.data.frame')
cols = colnames(Robj)
_names = names(Robj)
if cols != ri.NULL:
Robj = as_data_frame(Robj)
names = tuple(np.array(cols))
elif _names != ri.NULL:
names = tuple(np.array(_names))
else: # failed to find names
return np.asarray(Robj)
Robj = np.rec.fromarrays(Robj, names = names)
return np.asarray(Robj)
@generic
def pyconverter(pyobj):
"""Convert Python objects to R objects. Add types using the decorator:
@pyconverter.when_type
"""
return pyobj
# The default conversion for lists seems to make them a nested list. That has
# some advantages, but is rarely convenient, so for interactive use, we convert
# lists to a numpy array, which becomes an R vector.
@pyconverter.when_type(list)
def pyconverter_list(pyobj):
return np.asarray(pyobj)
if pandas2ri is None:
# pandas2ri was new in rpy2 2.3.3, so for now we'll fallback to pandas'
# conversion function.
try:
from pandas import DataFrame
from pandas.rpy.common import convert_to_r_dataframe
@pyconverter.when_type(DataFrame)
def pyconverter_dataframe(pyobj):
return convert_to_r_dataframe(pyobj, strings_as_factors=True)
except ImportError:
pass
@magics_class
class RMagics(Magics):
"""A set of magics useful for interactive work with R via rpy2.
"""
def __init__(self, shell, Rconverter=Rconverter,
pyconverter=pyconverter,
cache_display_data=False):
"""
Parameters
----------
shell : IPython shell
Rconverter : callable
To be called on values taken from R before putting them in the
IPython namespace.
pyconverter : callable
To be called on values in ipython namespace before
assigning to variables in rpy2.
cache_display_data : bool
If True, the published results of the final call to R are
cached in the variable 'display_cache'.
"""
super(RMagics, self).__init__(shell)
self.cache_display_data = cache_display_data
self.r = ro.R()
self.Rstdout_cache = []
self.pyconverter = pyconverter
self.Rconverter = Rconverter
def eval(self, line):
'''
Parse and evaluate a line of R code with rpy2.
Returns the output to R's stdout() connection,
the value generated by evaluating the code, and a
boolean indicating whether the return value would be
visible if the line of code were evaluated in an R REPL.
R Code evaluation and visibility determination are
done via an R call of the form withVisible({<code>})
'''
old_writeconsole = ri.get_writeconsole()
ri.set_writeconsole(self.write_console)
try:
res = ro.r("withVisible({%s\n})" % line)
value = res[0] #value (R object)
visible = ro.conversion.ri2py(res[1])[0] #visible (boolean)
except (ri.RRuntimeError, ValueError) as exception:
warning_or_other_msg = self.flush() # otherwise next return seems to have copy of error
raise RInterpreterError(line, str_to_unicode(str(exception)), warning_or_other_msg)
text_output = self.flush()
ri.set_writeconsole(old_writeconsole)
return text_output, value, visible
def write_console(self, output):
'''
A hook to capture R's stdout in a cache.
'''
self.Rstdout_cache.append(output)
def flush(self):
'''
Flush R's stdout cache to a string, returning the string.
'''
value = ''.join([str_to_unicode(s, 'utf-8') for s in self.Rstdout_cache])
self.Rstdout_cache = []
return value
@skip_doctest
@needs_local_scope
@line_magic
def Rpush(self, line, local_ns=None):
'''
A line-level magic for R that pushes
variables from python to rpy2. The line should be made up
of whitespace separated variable names in the IPython
namespace::
In [7]: import numpy as np
In [8]: X = np.array([4.5,6.3,7.9])
In [9]: X.mean()
Out[9]: 6.2333333333333343
In [10]: %Rpush X
In [11]: %R mean(X)
Out[11]: array([ 6.23333333])
'''
if local_ns is None:
local_ns = {}
inputs = line.split(' ')
for input in inputs:
try:
val = local_ns[input]
except KeyError:
try:
val = self.shell.user_ns[input]
except KeyError:
# reraise the KeyError as a NameError so that it looks like
# the standard python behavior when you use an unnamed
# variable
raise NameError("name '%s' is not defined" % input)
self.r.assign(input, self.pyconverter(val))
@skip_doctest
@magic_arguments()
@argument(
'-d', '--as_dataframe', action='store_true',
default=False,
help='Convert objects to data.frames before returning to ipython.'
)
@argument(
'outputs',
nargs='*',
)
@line_magic
def Rpull(self, line):
'''
A line-level magic for R that pulls
variables from python to rpy2::
In [18]: _ = %R x = c(3,4,6.7); y = c(4,6,7); z = c('a',3,4)
In [19]: %Rpull x y z
In [20]: x
Out[20]: array([ 3. , 4. , 6.7])
In [21]: y
Out[21]: array([ 4., 6., 7.])
In [22]: z
Out[22]:
array(['a', '3', '4'],
dtype='|S1')
If --as_dataframe, then each object is returned as a structured array
after first passed through "as.data.frame" in R before
being calling self.Rconverter.
This is useful when a structured array is desired as output, or
when the object in R has mixed data types.
See the %%R docstring for more examples.
Notes
-----
Beware that R names can have '.' so this is not fool proof.
To avoid this, don't name your R objects with '.'s...
'''
args = parse_argstring(self.Rpull, line)
outputs = args.outputs
for output in outputs:
self.shell.push({output:self.Rconverter(self.r(output),dataframe=args.as_dataframe)})
@skip_doctest
@magic_arguments()
@argument(
'-d', '--as_dataframe', action='store_true',
default=False,
help='Convert objects to data.frames before returning to ipython.'
)
@argument(
'output',
nargs=1,
type=str,
)
@line_magic
def Rget(self, line):
'''
Return an object from rpy2, possibly as a structured array (if possible).
Similar to Rpull except only one argument is accepted and the value is
returned rather than pushed to self.shell.user_ns::
In [3]: dtype=[('x', '<i4'), ('y', '<f8'), ('z', '|S1')]
In [4]: datapy = np.array([(1, 2.9, 'a'), (2, 3.5, 'b'), (3, 2.1, 'c'), (4, 5, 'e')], dtype=dtype)
In [5]: %R -i datapy
In [6]: %Rget datapy
Out[6]:
array([['1', '2', '3', '4'],
['2', '3', '2', '5'],
['a', 'b', 'c', 'e']],
dtype='|S1')
In [7]: %Rget -d datapy
Out[7]:
array([(1, 2.9, 'a'), (2, 3.5, 'b'), (3, 2.1, 'c'), (4, 5.0, 'e')],
dtype=[('x', '<i4'), ('y', '<f8'), ('z', '|S1')])
'''
args = parse_argstring(self.Rget, line)
output = args.output
return self.Rconverter(self.r(output[0]),dataframe=args.as_dataframe)
@skip_doctest
@magic_arguments()
@argument(
'-i', '--input', action='append',
help='Names of input variable from shell.user_ns to be assigned to R variables of the same names after calling self.pyconverter. Multiple names can be passed separated only by commas with no whitespace.'
)
@argument(
'-o', '--output', action='append',
help='Names of variables to be pushed from rpy2 to shell.user_ns after executing cell body and applying self.Rconverter. Multiple names can be passed separated only by commas with no whitespace.'
)
@argument(
'-w', '--width', type=int,
help='Width of png plotting device sent as an argument to *png* in R.'
)
@argument(
'-h', '--height', type=int,
help='Height of png plotting device sent as an argument to *png* in R.'
)
@argument(
'-d', '--dataframe', action='append',
help='Convert these objects to data.frames and return as structured arrays.'
)
@argument(
'-u', '--units', type=unicode_type, choices=["px", "in", "cm", "mm"],
help='Units of png plotting device sent as an argument to *png* in R. One of ["px", "in", "cm", "mm"].'
)
@argument(
'-r', '--res', type=int,
help='Resolution of png plotting device sent as an argument to *png* in R. Defaults to 72 if *units* is one of ["in", "cm", "mm"].'
)
@argument(
'-p', '--pointsize', type=int,
help='Pointsize of png plotting device sent as an argument to *png* in R.'
)
@argument(
'-b', '--bg',
help='Background of png plotting device sent as an argument to *png* in R.'
)
@argument(
'-n', '--noreturn',
help='Force the magic to not return anything.',
action='store_true',
default=False
)
@argument(
'code',
nargs='*',
)
@needs_local_scope
@line_cell_magic
def R(self, line, cell=None, local_ns=None):
'''
Execute code in R, and pull some of the results back into the Python namespace.
In line mode, this will evaluate an expression and convert the returned value to a Python object.
The return value is determined by rpy2's behaviour of returning the result of evaluating the
final line.
Multiple R lines can be executed by joining them with semicolons::
In [9]: %R X=c(1,4,5,7); sd(X); mean(X)
Out[9]: array([ 4.25])
In cell mode, this will run a block of R code. The resulting value
is printed if it would printed be when evaluating the same code
within a standard R REPL.
Nothing is returned to python by default in cell mode::
In [10]: %%R
....: Y = c(2,4,3,9)
....: summary(lm(Y~X))
Call:
lm(formula = Y ~ X)
Residuals:
1 2 3 4
0.88 -0.24 -2.28 1.64
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 0.0800 2.3000 0.035 0.975
X 1.0400 0.4822 2.157 0.164
Residual standard error: 2.088 on 2 degrees of freedom
Multiple R-squared: 0.6993,Adjusted R-squared: 0.549
F-statistic: 4.651 on 1 and 2 DF, p-value: 0.1638
In the notebook, plots are published as the output of the cell::
%R plot(X, Y)
will create a scatter plot of X bs Y.
If cell is not None and line has some R code, it is prepended to
the R code in cell.
Objects can be passed back and forth between rpy2 and python via the -i -o flags in line::
In [14]: Z = np.array([1,4,5,10])
In [15]: %R -i Z mean(Z)
Out[15]: array([ 5.])
In [16]: %R -o W W=Z*mean(Z)
Out[16]: array([ 5., 20., 25., 50.])
In [17]: W
Out[17]: array([ 5., 20., 25., 50.])
The return value is determined by these rules:
* If the cell is not None, the magic returns None.
* If the cell evaluates as False, the resulting value is returned
unless the final line prints something to the console, in
which case None is returned.
* If the final line results in a NULL value when evaluated
by rpy2, then None is returned.
* No attempt is made to convert the final value to a structured array.
Use the --dataframe flag or %Rget to push / return a structured array.
* If the -n flag is present, there is no return value.
* A trailing ';' will also result in no return value as the last
value in the line is an empty string.
The --dataframe argument will attempt to return structured arrays.
This is useful for dataframes with
mixed data types. Note also that for a data.frame,
if it is returned as an ndarray, it is transposed::
In [18]: dtype=[('x', '<i4'), ('y', '<f8'), ('z', '|S1')]
In [19]: datapy = np.array([(1, 2.9, 'a'), (2, 3.5, 'b'), (3, 2.1, 'c'), (4, 5, 'e')], dtype=dtype)
In [20]: %%R -o datar
datar = datapy
....:
In [21]: datar
Out[21]:
array([['1', '2', '3', '4'],
['2', '3', '2', '5'],
['a', 'b', 'c', 'e']],
dtype='|S1')
In [22]: %%R -d datar
datar = datapy
....:
In [23]: datar
Out[23]:
array([(1, 2.9, 'a'), (2, 3.5, 'b'), (3, 2.1, 'c'), (4, 5.0, 'e')],
dtype=[('x', '<i4'), ('y', '<f8'), ('z', '|S1')])
The --dataframe argument first tries colnames, then names.
If both are NULL, it returns an ndarray (i.e. unstructured)::
In [1]: %R mydata=c(4,6,8.3); NULL
In [2]: %R -d mydata
In [3]: mydata
Out[3]: array([ 4. , 6. , 8.3])
In [4]: %R names(mydata) = c('a','b','c'); NULL
In [5]: %R -d mydata
In [6]: mydata
Out[6]:
array((4.0, 6.0, 8.3),
dtype=[('a', '<f8'), ('b', '<f8'), ('c', '<f8')])
In [7]: %R -o mydata
In [8]: mydata
Out[8]: array([ 4. , 6. , 8.3])
'''
args = parse_argstring(self.R, line)
# arguments 'code' in line are prepended to
# the cell lines
if cell is None:
code = ''
return_output = True
line_mode = True
else:
code = cell
return_output = False
line_mode = False
code = ' '.join(args.code) + code
# if there is no local namespace then default to an empty dict
if local_ns is None:
local_ns = {}
if args.input:
for input in ','.join(args.input).split(','):
try:
val = local_ns[input]
except KeyError:
try:
val = self.shell.user_ns[input]
except KeyError:
raise NameError("name '%s' is not defined" % input)
self.r.assign(input, self.pyconverter(val))
if getattr(args, 'units') is not None:
if args.units != "px" and getattr(args, 'res') is None:
args.res = 72
args.units = '"%s"' % args.units
png_argdict = dict([(n, getattr(args, n)) for n in ['units', 'res', 'height', 'width', 'bg', 'pointsize']])
png_args = ','.join(['%s=%s' % (o,v) for o, v in png_argdict.items() if v is not None])
# execute the R code in a temporary directory
tmpd = tempfile.mkdtemp()
self.r('png("%s/Rplots%%03d.png",%s)' % (tmpd.replace('\\', '/'), png_args))
text_output = ''
try:
if line_mode:
for line in code.split(';'):
text_result, result, visible = self.eval(line)
text_output += text_result
if text_result:
# the last line printed something to the console so we won't return it
return_output = False
else:
text_result, result, visible = self.eval(code)
text_output += text_result
if visible:
old_writeconsole = ri.get_writeconsole()
ri.set_writeconsole(self.write_console)
ro.r.show(result)
text_output += self.flush()
ri.set_writeconsole(old_writeconsole)
except RInterpreterError as e:
print(e.stdout)
if not e.stdout.endswith(e.err):
print(e.err)
rmtree(tmpd)
return
finally:
self.r('dev.off()')
# read out all the saved .png files
images = [open(imgfile, 'rb').read() for imgfile in glob("%s/Rplots*png" % tmpd)]
# now publish the images
# mimicking IPython/zmq/pylab/backend_inline.py
fmt = 'png'
mimetypes = { 'png' : 'image/png', 'svg' : 'image/svg+xml' }
mime = mimetypes[fmt]
# publish the printed R objects, if any
display_data = []
if text_output:
display_data.append(('RMagic.R', {'text/plain':text_output}))
# flush text streams before sending figures, helps a little with output
for image in images:
# synchronization in the console (though it's a bandaid, not a real sln)
sys.stdout.flush(); sys.stderr.flush()
display_data.append(('RMagic.R', {mime: image}))
# kill the temporary directory
rmtree(tmpd)
# try to turn every output into a numpy array
# this means that output are assumed to be castable
# as numpy arrays
if args.output:
for output in ','.join(args.output).split(','):
self.shell.push({output:self.Rconverter(self.r(output), dataframe=False)})
if args.dataframe:
for output in ','.join(args.dataframe).split(','):
self.shell.push({output:self.Rconverter(self.r(output), dataframe=True)})
for tag, disp_d in display_data:
publish_display_data(data=disp_d, source=tag)
# this will keep a reference to the display_data
# which might be useful to other objects who happen to use
# this method
if self.cache_display_data:
self.display_cache = display_data
# if in line mode and return_output, return the result as an ndarray
if return_output and not args.noreturn:
if result != ri.NULL:
return self.Rconverter(result, dataframe=False)
__doc__ = __doc__.format(
R_DOC = dedent(RMagics.R.__doc__),
RPUSH_DOC = dedent(RMagics.Rpush.__doc__),
RPULL_DOC = dedent(RMagics.Rpull.__doc__),
RGET_DOC = dedent(RMagics.Rget.__doc__)
)
def load_ipython_extension(ip):
"""Load the extension in IPython."""
warnings.warn("The rmagic extension in IPython is deprecated in favour of "
"rpy2.ipython. If available, that will be loaded instead.\n"
"http://rpy.sourceforge.net/")
try:
import rpy2.ipython
except ImportError:
pass # Fall back to our own implementation for now
else:
return rpy2.ipython.load_ipython_extension(ip)
ip.register_magics(RMagics)
# Initialising rpy2 interferes with readline. Since, at this point, we've
# probably just loaded rpy2, we reset the delimiters. See issue gh-2759.
if ip.has_readline:
ip.readline.set_completer_delims(ip.readline_delims)
| gpl-3.0 |
harisbal/pandas | pandas/tests/groupby/test_groupby.py | 3 | 54271 | # -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
from datetime import datetime
from decimal import Decimal
from pandas import (date_range, Timestamp,
Index, MultiIndex, DataFrame, Series,
Panel, DatetimeIndex, read_csv)
from pandas.errors import PerformanceWarning
from pandas.util.testing import (assert_frame_equal,
assert_series_equal, assert_almost_equal)
from pandas.compat import (range, lrange, StringIO, lmap, lzip, map, zip,
OrderedDict)
from pandas import compat
from collections import defaultdict
import pandas.core.common as com
import numpy as np
import pandas.util.testing as tm
import pandas as pd
def test_repr():
# GH18203
result = repr(pd.Grouper(key='A', level='B'))
expected = "Grouper(key='A', level='B', axis=0, sort=False)"
assert result == expected
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'float64', 'float32'])
def test_basic(dtype):
data = Series(np.arange(9) // 3, index=np.arange(9), dtype=dtype)
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
for k, v in grouped:
assert len(v) == 3
agged = grouped.aggregate(np.mean)
assert agged[1] == 1
assert_series_equal(agged, grouped.agg(np.mean)) # shorthand
assert_series_equal(agged, grouped.mean())
assert_series_equal(grouped.agg(np.sum), grouped.sum())
expected = grouped.apply(lambda x: x * x.sum())
transformed = grouped.transform(lambda x: x * x.sum())
assert transformed[7] == 12
assert_series_equal(transformed, expected)
value_grouped = data.groupby(data)
assert_series_equal(value_grouped.aggregate(np.mean), agged,
check_index_type=False)
# complex agg
agged = grouped.aggregate([np.mean, np.std])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
agged = grouped.aggregate({'one': np.mean, 'two': np.std})
group_constants = {0: 10, 1: 20, 2: 30}
agged = grouped.agg(lambda x: group_constants[x.name] + x.mean())
assert agged[1] == 21
# corner cases
pytest.raises(Exception, grouped.aggregate, lambda x: x * 2)
def test_groupby_nonobject_dtype(mframe, df_mixed_floats):
key = mframe.index.labels[0]
grouped = mframe.groupby(key)
result = grouped.sum()
expected = mframe.groupby(key.astype('O')).sum()
assert_frame_equal(result, expected)
# GH 3911, mixed frame non-conversion
df = df_mixed_floats.copy()
df['value'] = lrange(len(df))
def max_value(group):
return group.loc[group['value'].idxmax()]
applied = df.groupby('A').apply(max_value)
result = applied.get_dtype_counts().sort_values()
expected = Series({'float64': 2,
'int64': 1,
'object': 2}).sort_values()
assert_series_equal(result, expected)
def test_groupby_return_type():
# GH2893, return a reduced type
df1 = DataFrame(
[{"val1": 1, "val2": 20},
{"val1": 1, "val2": 19},
{"val1": 2, "val2": 27},
{"val1": 2, "val2": 12}
])
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
result = df1.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
df2 = DataFrame(
[{"val1": 1, "val2": 20},
{"val1": 1, "val2": 19},
{"val1": 1, "val2": 27},
{"val1": 1, "val2": 12}
])
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
result = df2.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
# GH3596, return a consistent type (regression in 0.11 from 0.10.1)
df = DataFrame([[1, 1], [1, 1]], columns=['X', 'Y'])
result = df.groupby('X', squeeze=False).count()
assert isinstance(result, DataFrame)
# GH5592
# inconcistent return type
df = DataFrame(dict(A=['Tiger', 'Tiger', 'Tiger', 'Lamb', 'Lamb',
'Pony', 'Pony'], B=Series(
np.arange(7), dtype='int64'), C=date_range(
'20130101', periods=7)))
def f(grp):
return grp.iloc[0]
expected = df.groupby('A').first()[['B']]
result = df.groupby('A').apply(f)[['B']]
assert_frame_equal(result, expected)
def f(grp):
if grp.name == 'Tiger':
return None
return grp.iloc[0]
result = df.groupby('A').apply(f)[['B']]
e = expected.copy()
e.loc['Tiger'] = np.nan
assert_frame_equal(result, e)
def f(grp):
if grp.name == 'Pony':
return None
return grp.iloc[0]
result = df.groupby('A').apply(f)[['B']]
e = expected.copy()
e.loc['Pony'] = np.nan
assert_frame_equal(result, e)
# 5592 revisited, with datetimes
def f(grp):
if grp.name == 'Pony':
return None
return grp.iloc[0]
result = df.groupby('A').apply(f)[['C']]
e = df.groupby('A').first()[['C']]
e.loc['Pony'] = pd.NaT
assert_frame_equal(result, e)
# scalar outputs
def f(grp):
if grp.name == 'Pony':
return None
return grp.iloc[0].loc['C']
result = df.groupby('A').apply(f)
e = df.groupby('A').first()['C'].copy()
e.loc['Pony'] = np.nan
e.name = None
assert_series_equal(result, e)
def test_pass_args_kwargs(ts, tsframe):
def f(x, q=None, axis=0):
return np.percentile(x, q, axis=axis)
g = lambda x: np.percentile(x, 80, axis=0)
# Series
ts_grouped = ts.groupby(lambda x: x.month)
agg_result = ts_grouped.agg(np.percentile, 80, axis=0)
apply_result = ts_grouped.apply(np.percentile, 80, axis=0)
trans_result = ts_grouped.transform(np.percentile, 80, axis=0)
agg_expected = ts_grouped.quantile(.8)
trans_expected = ts_grouped.transform(g)
assert_series_equal(apply_result, agg_expected)
assert_series_equal(agg_result, agg_expected, check_names=False)
assert_series_equal(trans_result, trans_expected)
agg_result = ts_grouped.agg(f, q=80)
apply_result = ts_grouped.apply(f, q=80)
trans_result = ts_grouped.transform(f, q=80)
assert_series_equal(agg_result, agg_expected)
assert_series_equal(apply_result, agg_expected)
assert_series_equal(trans_result, trans_expected)
# DataFrame
df_grouped = tsframe.groupby(lambda x: x.month)
agg_result = df_grouped.agg(np.percentile, 80, axis=0)
apply_result = df_grouped.apply(DataFrame.quantile, .8)
expected = df_grouped.quantile(.8)
assert_frame_equal(apply_result, expected)
assert_frame_equal(agg_result, expected, check_names=False)
agg_result = df_grouped.agg(f, q=80)
apply_result = df_grouped.apply(DataFrame.quantile, q=.8)
assert_frame_equal(agg_result, expected, check_names=False)
assert_frame_equal(apply_result, expected)
def test_len():
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day])
assert len(grouped) == len(df)
grouped = df.groupby([lambda x: x.year, lambda x: x.month])
expected = len({(x.year, x.month) for x in df.index})
assert len(grouped) == expected
# issue 11016
df = pd.DataFrame(dict(a=[np.nan] * 3, b=[1, 2, 3]))
assert len(df.groupby(('a'))) == 0
assert len(df.groupby(('b'))) == 3
assert len(df.groupby(['a', 'b'])) == 3
def test_basic_regression():
# regression
T = [1.0 * x for x in lrange(1, 10) * 10][:1095]
result = Series(T, lrange(0, len(T)))
groupings = np.random.random((1100, ))
groupings = Series(groupings, lrange(0, len(groupings))) * 10.
grouped = result.groupby(groupings)
grouped.mean()
@pytest.mark.parametrize('dtype', ['float64', 'float32', 'int64',
'int32', 'int16', 'int8'])
def test_with_na_groups(dtype):
index = Index(np.arange(10))
values = Series(np.ones(10), index, dtype=dtype)
labels = Series([np.nan, 'foo', 'bar', 'bar', np.nan, np.nan,
'bar', 'bar', np.nan, 'foo'], index=index)
# this SHOULD be an int
grouped = values.groupby(labels)
agged = grouped.agg(len)
expected = Series([4, 2], index=['bar', 'foo'])
assert_series_equal(agged, expected, check_dtype=False)
# assert issubclass(agged.dtype.type, np.integer)
# explicitly return a float from my function
def f(x):
return float(len(x))
agged = grouped.agg(f)
expected = Series([4, 2], index=['bar', 'foo'])
assert_series_equal(agged, expected, check_dtype=False)
assert issubclass(agged.dtype.type, np.dtype(dtype).type)
def test_indices_concatenation_order():
# GH 2808
def f1(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(levels=[[]] * 2, labels=[[]] * 2,
names=['b', 'c'])
res = DataFrame(None, columns=['a'], index=multiindex)
return res
else:
y = y.set_index(['b', 'c'])
return y
def f2(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
return DataFrame()
else:
y = y.set_index(['b', 'c'])
return y
def f3(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(levels=[[]] * 2, labels=[[]] * 2,
names=['foo', 'bar'])
res = DataFrame(None, columns=['a', 'b'], index=multiindex)
return res
else:
return y
df = DataFrame({'a': [1, 2, 2, 2], 'b': lrange(4), 'c': lrange(5, 9)})
df2 = DataFrame({'a': [3, 2, 2, 2], 'b': lrange(4), 'c': lrange(5, 9)})
# correct result
result1 = df.groupby('a').apply(f1)
result2 = df2.groupby('a').apply(f1)
assert_frame_equal(result1, result2)
# should fail (not the same number of levels)
pytest.raises(AssertionError, df.groupby('a').apply, f2)
pytest.raises(AssertionError, df2.groupby('a').apply, f2)
# should fail (incorrect shape)
pytest.raises(AssertionError, df.groupby('a').apply, f3)
pytest.raises(AssertionError, df2.groupby('a').apply, f3)
def test_attr_wrapper(ts):
grouped = ts.groupby(lambda x: x.weekday())
result = grouped.std()
expected = grouped.agg(lambda x: np.std(x, ddof=1))
assert_series_equal(result, expected)
# this is pretty cool
result = grouped.describe()
expected = {}
for name, gp in grouped:
expected[name] = gp.describe()
expected = DataFrame(expected).T
assert_frame_equal(result, expected)
# get attribute
result = grouped.dtype
expected = grouped.agg(lambda x: x.dtype)
# make sure raises error
pytest.raises(AttributeError, getattr, grouped, 'foo')
def test_frame_groupby(tsframe):
grouped = tsframe.groupby(lambda x: x.weekday())
# aggregate
aggregated = grouped.aggregate(np.mean)
assert len(aggregated) == 5
assert len(aggregated.columns) == 4
# by string
tscopy = tsframe.copy()
tscopy['weekday'] = [x.weekday() for x in tscopy.index]
stragged = tscopy.groupby('weekday').aggregate(np.mean)
assert_frame_equal(stragged, aggregated, check_names=False)
# transform
grouped = tsframe.head(30).groupby(lambda x: x.weekday())
transformed = grouped.transform(lambda x: x - x.mean())
assert len(transformed) == 30
assert len(transformed.columns) == 4
# transform propagate
transformed = grouped.transform(lambda x: x.mean())
for name, group in grouped:
mean = group.mean()
for idx in group.index:
tm.assert_series_equal(transformed.xs(idx), mean,
check_names=False)
# iterate
for weekday, group in grouped:
assert group.index[0].weekday() == weekday
# groups / group_indices
groups = grouped.groups
indices = grouped.indices
for k, v in compat.iteritems(groups):
samething = tsframe.index.take(indices[k])
assert (samething == v).all()
def test_frame_groupby_columns(tsframe):
mapping = {'A': 0, 'B': 0, 'C': 1, 'D': 1}
grouped = tsframe.groupby(mapping, axis=1)
# aggregate
aggregated = grouped.aggregate(np.mean)
assert len(aggregated) == len(tsframe)
assert len(aggregated.columns) == 2
# transform
tf = lambda x: x - x.mean()
groupedT = tsframe.T.groupby(mapping, axis=0)
assert_frame_equal(groupedT.transform(tf).T, grouped.transform(tf))
# iterate
for k, v in grouped:
assert len(v.columns) == 2
def test_frame_set_name_single(df):
grouped = df.groupby('A')
result = grouped.mean()
assert result.index.name == 'A'
result = df.groupby('A', as_index=False).mean()
assert result.index.name != 'A'
result = grouped.agg(np.mean)
assert result.index.name == 'A'
result = grouped.agg({'C': np.mean, 'D': np.std})
assert result.index.name == 'A'
result = grouped['C'].mean()
assert result.index.name == 'A'
result = grouped['C'].agg(np.mean)
assert result.index.name == 'A'
result = grouped['C'].agg([np.mean, np.std])
assert result.index.name == 'A'
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = grouped['C'].agg({'foo': np.mean, 'bar': np.std})
assert result.index.name == 'A'
def test_multi_func(df):
col1 = df['A']
col2 = df['B']
grouped = df.groupby([col1.get, col2.get])
agged = grouped.mean()
expected = df.groupby(['A', 'B']).mean()
# TODO groupby get drops names
assert_frame_equal(agged.loc[:, ['C', 'D']],
expected.loc[:, ['C', 'D']],
check_names=False)
# some "groups" with no data
df = DataFrame({'v1': np.random.randn(6),
'v2': np.random.randn(6),
'k1': np.array(['b', 'b', 'b', 'a', 'a', 'a']),
'k2': np.array(['1', '1', '1', '2', '2', '2'])},
index=['one', 'two', 'three', 'four', 'five', 'six'])
# only verify that it works for now
grouped = df.groupby(['k1', 'k2'])
grouped.agg(np.sum)
def test_multi_key_multiple_functions(df):
grouped = df.groupby(['A', 'B'])['C']
agged = grouped.agg([np.mean, np.std])
expected = DataFrame({'mean': grouped.agg(np.mean),
'std': grouped.agg(np.std)})
assert_frame_equal(agged, expected)
def test_frame_multi_key_function_list():
data = DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
grouped = data.groupby(['A', 'B'])
funcs = [np.mean, np.std]
agged = grouped.agg(funcs)
expected = pd.concat([grouped['D'].agg(funcs), grouped['E'].agg(funcs),
grouped['F'].agg(funcs)],
keys=['D', 'E', 'F'], axis=1)
assert (isinstance(agged.index, MultiIndex))
assert (isinstance(expected.index, MultiIndex))
assert_frame_equal(agged, expected)
@pytest.mark.parametrize('op', [lambda x: x.sum(), lambda x: x.mean()])
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
def test_groupby_multiple_columns(df, op):
data = df
grouped = data.groupby(['A', 'B'])
result1 = op(grouped)
expected = defaultdict(dict)
for n1, gp1 in data.groupby('A'):
for n2, gp2 in gp1.groupby('B'):
expected[n1][n2] = op(gp2.loc[:, ['C', 'D']])
expected = {k: DataFrame(v)
for k, v in compat.iteritems(expected)}
expected = Panel.fromDict(expected).swapaxes(0, 1)
expected.major_axis.name, expected.minor_axis.name = 'A', 'B'
# a little bit crude
for col in ['C', 'D']:
result_col = op(grouped[col])
exp = expected[col]
pivoted = result1[col].unstack()
pivoted2 = result_col.unstack()
assert_frame_equal(pivoted.reindex_like(exp), exp)
assert_frame_equal(pivoted2.reindex_like(exp), exp)
# test single series works the same
result = data['C'].groupby([data['A'], data['B']]).mean()
expected = data.groupby(['A', 'B']).mean()['C']
assert_series_equal(result, expected)
def test_groupby_as_index_agg(df):
grouped = df.groupby('A', as_index=False)
# single-key
result = grouped.agg(np.mean)
expected = grouped.mean()
assert_frame_equal(result, expected)
result2 = grouped.agg(OrderedDict([['C', np.mean], ['D', np.sum]]))
expected2 = grouped.mean()
expected2['D'] = grouped.sum()['D']
assert_frame_equal(result2, expected2)
grouped = df.groupby('A', as_index=True)
expected3 = grouped['C'].sum()
expected3 = DataFrame(expected3).rename(columns={'C': 'Q'})
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result3 = grouped['C'].agg({'Q': np.sum})
assert_frame_equal(result3, expected3)
# multi-key
grouped = df.groupby(['A', 'B'], as_index=False)
result = grouped.agg(np.mean)
expected = grouped.mean()
assert_frame_equal(result, expected)
result2 = grouped.agg(OrderedDict([['C', np.mean], ['D', np.sum]]))
expected2 = grouped.mean()
expected2['D'] = grouped.sum()['D']
assert_frame_equal(result2, expected2)
expected3 = grouped['C'].sum()
expected3 = DataFrame(expected3).rename(columns={'C': 'Q'})
result3 = grouped['C'].agg({'Q': np.sum})
assert_frame_equal(result3, expected3)
# GH7115 & GH8112 & GH8582
df = DataFrame(np.random.randint(0, 100, (50, 3)),
columns=['jim', 'joe', 'jolie'])
ts = Series(np.random.randint(5, 10, 50), name='jim')
gr = df.groupby(ts)
gr.nth(0) # invokes set_selection_from_grouper internally
assert_frame_equal(gr.apply(sum), df.groupby(ts).apply(sum))
for attr in ['mean', 'max', 'count', 'idxmax', 'cumsum', 'all']:
gr = df.groupby(ts, as_index=False)
left = getattr(gr, attr)()
gr = df.groupby(ts.values, as_index=True)
right = getattr(gr, attr)().reset_index(drop=True)
assert_frame_equal(left, right)
def test_as_index_series_return_frame(df):
grouped = df.groupby('A', as_index=False)
grouped2 = df.groupby(['A', 'B'], as_index=False)
result = grouped['C'].agg(np.sum)
expected = grouped.agg(np.sum).loc[:, ['A', 'C']]
assert isinstance(result, DataFrame)
assert_frame_equal(result, expected)
result2 = grouped2['C'].agg(np.sum)
expected2 = grouped2.agg(np.sum).loc[:, ['A', 'B', 'C']]
assert isinstance(result2, DataFrame)
assert_frame_equal(result2, expected2)
result = grouped['C'].sum()
expected = grouped.sum().loc[:, ['A', 'C']]
assert isinstance(result, DataFrame)
assert_frame_equal(result, expected)
result2 = grouped2['C'].sum()
expected2 = grouped2.sum().loc[:, ['A', 'B', 'C']]
assert isinstance(result2, DataFrame)
assert_frame_equal(result2, expected2)
def test_as_index_series_column_slice_raises(df):
# GH15072
grouped = df.groupby('A', as_index=False)
msg = r"Column\(s\) C already selected"
with tm.assert_raises_regex(IndexError, msg):
grouped['C'].__getitem__('D')
def test_groupby_as_index_cython(df):
data = df
# single-key
grouped = data.groupby('A', as_index=False)
result = grouped.mean()
expected = data.groupby(['A']).mean()
expected.insert(0, 'A', expected.index)
expected.index = np.arange(len(expected))
assert_frame_equal(result, expected)
# multi-key
grouped = data.groupby(['A', 'B'], as_index=False)
result = grouped.mean()
expected = data.groupby(['A', 'B']).mean()
arrays = lzip(*expected.index.values)
expected.insert(0, 'A', arrays[0])
expected.insert(1, 'B', arrays[1])
expected.index = np.arange(len(expected))
assert_frame_equal(result, expected)
def test_groupby_as_index_series_scalar(df):
grouped = df.groupby(['A', 'B'], as_index=False)
# GH #421
result = grouped['C'].agg(len)
expected = grouped.agg(len).loc[:, ['A', 'B', 'C']]
assert_frame_equal(result, expected)
def test_groupby_as_index_corner(df, ts):
pytest.raises(TypeError, ts.groupby, lambda x: x.weekday(),
as_index=False)
pytest.raises(ValueError, df.groupby, lambda x: x.lower(),
as_index=False, axis=1)
def test_groupby_multiple_key(df):
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day])
agged = grouped.sum()
assert_almost_equal(df.values, agged.values)
grouped = df.T.groupby([lambda x: x.year,
lambda x: x.month,
lambda x: x.day], axis=1)
agged = grouped.agg(lambda x: x.sum())
tm.assert_index_equal(agged.index, df.columns)
assert_almost_equal(df.T.values, agged.values)
agged = grouped.agg(lambda x: x.sum())
assert_almost_equal(df.T.values, agged.values)
def test_groupby_multi_corner(df):
# test that having an all-NA column doesn't mess you up
df = df.copy()
df['bad'] = np.nan
agged = df.groupby(['A', 'B']).mean()
expected = df.groupby(['A', 'B']).mean()
expected['bad'] = np.nan
assert_frame_equal(agged, expected)
def test_omit_nuisance(df):
grouped = df.groupby('A')
result = grouped.mean()
expected = df.loc[:, ['A', 'C', 'D']].groupby('A').mean()
assert_frame_equal(result, expected)
agged = grouped.agg(np.mean)
exp = grouped.mean()
assert_frame_equal(agged, exp)
df = df.loc[:, ['A', 'C', 'D']]
df['E'] = datetime.now()
grouped = df.groupby('A')
result = grouped.agg(np.sum)
expected = grouped.sum()
assert_frame_equal(result, expected)
# won't work with axis = 1
grouped = df.groupby({'A': 0, 'C': 0, 'D': 1, 'E': 1}, axis=1)
result = pytest.raises(TypeError, grouped.agg,
lambda x: x.sum(0, numeric_only=False))
def test_omit_nuisance_python_multiple(three_group):
grouped = three_group.groupby(['A', 'B'])
agged = grouped.agg(np.mean)
exp = grouped.mean()
assert_frame_equal(agged, exp)
def test_empty_groups_corner(mframe):
# handle empty groups
df = DataFrame({'k1': np.array(['b', 'b', 'b', 'a', 'a', 'a']),
'k2': np.array(['1', '1', '1', '2', '2', '2']),
'k3': ['foo', 'bar'] * 3,
'v1': np.random.randn(6),
'v2': np.random.randn(6)})
grouped = df.groupby(['k1', 'k2'])
result = grouped.agg(np.mean)
expected = grouped.mean()
assert_frame_equal(result, expected)
grouped = mframe[3:5].groupby(level=0)
agged = grouped.apply(lambda x: x.mean())
agged_A = grouped['A'].apply(np.mean)
assert_series_equal(agged['A'], agged_A)
assert agged.index.name == 'first'
def test_nonsense_func():
df = DataFrame([0])
pytest.raises(Exception, df.groupby, lambda x: x + 'foo')
def test_wrap_aggregated_output_multindex(mframe):
df = mframe.T
df['baz', 'two'] = 'peekaboo'
keys = [np.array([0, 0, 1]), np.array([0, 0, 1])]
agged = df.groupby(keys).agg(np.mean)
assert isinstance(agged.columns, MultiIndex)
def aggfun(ser):
if ser.name == ('foo', 'one'):
raise TypeError
else:
return ser.sum()
agged2 = df.groupby(keys).aggregate(aggfun)
assert len(agged2.columns) + 1 == len(df.columns)
def test_groupby_level_apply(mframe):
result = mframe.groupby(level=0).count()
assert result.index.name == 'first'
result = mframe.groupby(level=1).count()
assert result.index.name == 'second'
result = mframe['A'].groupby(level=0).count()
assert result.index.name == 'first'
def test_groupby_level_mapper(mframe):
deleveled = mframe.reset_index()
mapper0 = {'foo': 0, 'bar': 0, 'baz': 1, 'qux': 1}
mapper1 = {'one': 0, 'two': 0, 'three': 1}
result0 = mframe.groupby(mapper0, level=0).sum()
result1 = mframe.groupby(mapper1, level=1).sum()
mapped_level0 = np.array([mapper0.get(x) for x in deleveled['first']])
mapped_level1 = np.array([mapper1.get(x) for x in deleveled['second']])
expected0 = mframe.groupby(mapped_level0).sum()
expected1 = mframe.groupby(mapped_level1).sum()
expected0.index.name, expected1.index.name = 'first', 'second'
assert_frame_equal(result0, expected0)
assert_frame_equal(result1, expected1)
def test_groupby_level_nonmulti():
# GH 1313, GH 13901
s = Series([1, 2, 3, 10, 4, 5, 20, 6],
Index([1, 2, 3, 1, 4, 5, 2, 6], name='foo'))
expected = Series([11, 22, 3, 4, 5, 6],
Index(range(1, 7), name='foo'))
result = s.groupby(level=0).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=[0]).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=-1).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=[-1]).sum()
tm.assert_series_equal(result, expected)
pytest.raises(ValueError, s.groupby, level=1)
pytest.raises(ValueError, s.groupby, level=-2)
pytest.raises(ValueError, s.groupby, level=[])
pytest.raises(ValueError, s.groupby, level=[0, 0])
pytest.raises(ValueError, s.groupby, level=[0, 1])
pytest.raises(ValueError, s.groupby, level=[1])
def test_groupby_complex():
# GH 12902
a = Series(data=np.arange(4) * (1 + 2j), index=[0, 0, 1, 1])
expected = Series((1 + 2j, 5 + 10j))
result = a.groupby(level=0).sum()
assert_series_equal(result, expected)
result = a.sum(level=0)
assert_series_equal(result, expected)
def test_mutate_groups():
# GH3380
df = DataFrame({
'cat1': ['a'] * 8 + ['b'] * 6,
'cat2': ['c'] * 2 + ['d'] * 2 + ['e'] * 2 + ['f'] * 2 + ['c'] * 2 +
['d'] * 2 + ['e'] * 2,
'cat3': lmap(lambda x: 'g%s' % x, lrange(1, 15)),
'val': np.random.randint(100, size=14),
})
def f_copy(x):
x = x.copy()
x['rank'] = x.val.rank(method='min')
return x.groupby('cat2')['rank'].min()
def f_no_copy(x):
x['rank'] = x.val.rank(method='min')
return x.groupby('cat2')['rank'].min()
grpby_copy = df.groupby('cat1').apply(f_copy)
grpby_no_copy = df.groupby('cat1').apply(f_no_copy)
assert_series_equal(grpby_copy, grpby_no_copy)
def test_no_mutate_but_looks_like():
# GH 8467
# first show's mutation indicator
# second does not, but should yield the same results
df = DataFrame({'key': [1, 1, 1, 2, 2, 2, 3, 3, 3], 'value': range(9)})
result1 = df.groupby('key', group_keys=True).apply(lambda x: x[:].key)
result2 = df.groupby('key', group_keys=True).apply(lambda x: x.key)
assert_series_equal(result1, result2)
def test_groupby_series_indexed_differently():
s1 = Series([5.0, -9.0, 4.0, 100., -5., 55., 6.7],
index=Index(['a', 'b', 'c', 'd', 'e', 'f', 'g']))
s2 = Series([1.0, 1.0, 4.0, 5.0, 5.0, 7.0],
index=Index(['a', 'b', 'd', 'f', 'g', 'h']))
grouped = s1.groupby(s2)
agged = grouped.mean()
exp = s1.groupby(s2.reindex(s1.index).get).mean()
assert_series_equal(agged, exp)
def test_groupby_with_hier_columns():
tuples = list(zip(*[['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux',
'qux'], ['one', 'two', 'one', 'two', 'one', 'two',
'one', 'two']]))
index = MultiIndex.from_tuples(tuples)
columns = MultiIndex.from_tuples([('A', 'cat'), ('B', 'dog'), (
'B', 'cat'), ('A', 'dog')])
df = DataFrame(np.random.randn(8, 4), index=index, columns=columns)
result = df.groupby(level=0).mean()
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0, axis=1).mean()
tm.assert_index_equal(result.index, df.index)
result = df.groupby(level=0).agg(np.mean)
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0).apply(lambda x: x.mean())
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0, axis=1).agg(lambda x: x.mean(1))
tm.assert_index_equal(result.columns, Index(['A', 'B']))
tm.assert_index_equal(result.index, df.index)
# add a nuisance column
sorted_columns, _ = columns.sortlevel(0)
df['A', 'foo'] = 'bar'
result = df.groupby(level=0).mean()
tm.assert_index_equal(result.columns, df.columns[:-1])
def test_grouping_ndarray(df):
grouped = df.groupby(df['A'].values)
result = grouped.sum()
expected = df.groupby('A').sum()
assert_frame_equal(result, expected, check_names=False
) # Note: no names when grouping by value
def test_groupby_wrong_multi_labels():
data = """index,foo,bar,baz,spam,data
0,foo1,bar1,baz1,spam2,20
1,foo1,bar2,baz1,spam3,30
2,foo2,bar2,baz1,spam2,40
3,foo1,bar1,baz2,spam1,50
4,foo3,bar1,baz2,spam1,60"""
data = read_csv(StringIO(data), index_col=0)
grouped = data.groupby(['foo', 'bar', 'baz', 'spam'])
result = grouped.agg(np.mean)
expected = grouped.mean()
assert_frame_equal(result, expected)
def test_groupby_series_with_name(df):
result = df.groupby(df['A']).mean()
result2 = df.groupby(df['A'], as_index=False).mean()
assert result.index.name == 'A'
assert 'A' in result2
result = df.groupby([df['A'], df['B']]).mean()
result2 = df.groupby([df['A'], df['B']],
as_index=False).mean()
assert result.index.names == ('A', 'B')
assert 'A' in result2
assert 'B' in result2
def test_seriesgroupby_name_attr(df):
# GH 6265
result = df.groupby('A')['C']
assert result.count().name == 'C'
assert result.mean().name == 'C'
testFunc = lambda x: np.sum(x) * 2
assert result.agg(testFunc).name == 'C'
def test_consistency_name():
# GH 12363
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': np.random.randn(8) + 1.0,
'D': np.arange(8)})
expected = df.groupby(['A']).B.count()
result = df.B.groupby(df.A).count()
assert_series_equal(result, expected)
def test_groupby_name_propagation(df):
# GH 6124
def summarize(df, name=None):
return Series({'count': 1, 'mean': 2, 'omissions': 3, }, name=name)
def summarize_random_name(df):
# Provide a different name for each Series. In this case, groupby
# should not attempt to propagate the Series name since they are
# inconsistent.
return Series({
'count': 1,
'mean': 2,
'omissions': 3,
}, name=df.iloc[0]['A'])
metrics = df.groupby('A').apply(summarize)
assert metrics.columns.name is None
metrics = df.groupby('A').apply(summarize, 'metrics')
assert metrics.columns.name == 'metrics'
metrics = df.groupby('A').apply(summarize_random_name)
assert metrics.columns.name is None
def test_groupby_nonstring_columns():
df = DataFrame([np.arange(10) for x in range(10)])
grouped = df.groupby(0)
result = grouped.mean()
expected = df.groupby(df[0]).mean()
assert_frame_equal(result, expected)
def test_groupby_mixed_type_columns():
# GH 13432, unorderable types in py3
df = DataFrame([[0, 1, 2]], columns=['A', 'B', 0])
expected = DataFrame([[1, 2]], columns=['B', 0],
index=Index([0], name='A'))
result = df.groupby('A').first()
tm.assert_frame_equal(result, expected)
result = df.groupby('A').sum()
tm.assert_frame_equal(result, expected)
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:Mean of:RuntimeWarning")
def test_cython_grouper_series_bug_noncontig():
arr = np.empty((100, 100))
arr.fill(np.nan)
obj = Series(arr[:, 0], index=lrange(100))
inds = np.tile(lrange(10), 10)
result = obj.groupby(inds).agg(Series.median)
assert result.isna().all()
def test_series_grouper_noncontig_index():
index = Index(tm.rands_array(10, 100))
values = Series(np.random.randn(50), index=index[::2])
labels = np.random.randint(0, 5, 50)
# it works!
grouped = values.groupby(labels)
# accessing the index elements causes segfault
f = lambda x: len(set(map(id, x.index)))
grouped.agg(f)
def test_convert_objects_leave_decimal_alone():
s = Series(lrange(5))
labels = np.array(['a', 'b', 'c', 'd', 'e'], dtype='O')
def convert_fast(x):
return Decimal(str(x.mean()))
def convert_force_pure(x):
# base will be length 0
assert (len(x.values.base) > 0)
return Decimal(str(x.mean()))
grouped = s.groupby(labels)
result = grouped.agg(convert_fast)
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
result = grouped.agg(convert_force_pure)
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
def test_groupby_dtype_inference_empty():
# GH 6733
df = DataFrame({'x': [], 'range': np.arange(0, dtype='int64')})
assert df['x'].dtype == np.float64
result = df.groupby('x').first()
exp_index = Index([], name='x', dtype=np.float64)
expected = DataFrame({'range': Series(
[], index=exp_index, dtype='int64')})
assert_frame_equal(result, expected, by_blocks=True)
def test_groupby_list_infer_array_like(df):
result = df.groupby(list(df['A'])).mean()
expected = df.groupby(df['A']).mean()
assert_frame_equal(result, expected, check_names=False)
pytest.raises(Exception, df.groupby, list(df['A'][:-1]))
# pathological case of ambiguity
df = DataFrame({'foo': [0, 1],
'bar': [3, 4],
'val': np.random.randn(2)})
result = df.groupby(['foo', 'bar']).mean()
expected = df.groupby([df['foo'], df['bar']]).mean()[['val']]
def test_groupby_keys_same_size_as_index():
# GH 11185
freq = 's'
index = pd.date_range(start=pd.Timestamp('2015-09-29T11:34:44-0700'),
periods=2, freq=freq)
df = pd.DataFrame([['A', 10], ['B', 15]], columns=[
'metric', 'values'
], index=index)
result = df.groupby([pd.Grouper(level=0, freq=freq), 'metric']).mean()
expected = df.set_index([df.index, 'metric'])
assert_frame_equal(result, expected)
def test_groupby_one_row():
# GH 11741
df1 = pd.DataFrame(np.random.randn(1, 4), columns=list('ABCD'))
pytest.raises(KeyError, df1.groupby, 'Z')
df2 = pd.DataFrame(np.random.randn(2, 4), columns=list('ABCD'))
pytest.raises(KeyError, df2.groupby, 'Z')
def test_groupby_nat_exclude():
# GH 6992
df = pd.DataFrame(
{'values': np.random.randn(8),
'dt': [np.nan, pd.Timestamp('2013-01-01'), np.nan, pd.Timestamp(
'2013-02-01'), np.nan, pd.Timestamp('2013-02-01'), np.nan,
pd.Timestamp('2013-01-01')],
'str': [np.nan, 'a', np.nan, 'a', np.nan, 'a', np.nan, 'b']})
grouped = df.groupby('dt')
expected = [pd.Index([1, 7]), pd.Index([3, 5])]
keys = sorted(grouped.groups.keys())
assert len(keys) == 2
for k, e in zip(keys, expected):
# grouped.groups keys are np.datetime64 with system tz
# not to be affected by tz, only compare values
tm.assert_index_equal(grouped.groups[k], e)
# confirm obj is not filtered
tm.assert_frame_equal(grouped.grouper.groupings[0].obj, df)
assert grouped.ngroups == 2
expected = {
Timestamp('2013-01-01 00:00:00'): np.array([1, 7], dtype=np.int64),
Timestamp('2013-02-01 00:00:00'): np.array([3, 5], dtype=np.int64)
}
for k in grouped.indices:
tm.assert_numpy_array_equal(grouped.indices[k], expected[k])
tm.assert_frame_equal(
grouped.get_group(Timestamp('2013-01-01')), df.iloc[[1, 7]])
tm.assert_frame_equal(
grouped.get_group(Timestamp('2013-02-01')), df.iloc[[3, 5]])
pytest.raises(KeyError, grouped.get_group, pd.NaT)
nan_df = DataFrame({'nan': [np.nan, np.nan, np.nan],
'nat': [pd.NaT, pd.NaT, pd.NaT]})
assert nan_df['nan'].dtype == 'float64'
assert nan_df['nat'].dtype == 'datetime64[ns]'
for key in ['nan', 'nat']:
grouped = nan_df.groupby(key)
assert grouped.groups == {}
assert grouped.ngroups == 0
assert grouped.indices == {}
pytest.raises(KeyError, grouped.get_group, np.nan)
pytest.raises(KeyError, grouped.get_group, pd.NaT)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
def test_sparse_friendly(df):
sdf = df[['C', 'D']].to_sparse()
panel = tm.makePanel()
tm.add_nans(panel)
def _check_work(gp):
gp.mean()
gp.agg(np.mean)
dict(iter(gp))
# it works!
_check_work(sdf.groupby(lambda x: x // 2))
_check_work(sdf['C'].groupby(lambda x: x // 2))
_check_work(sdf.groupby(df['A']))
# do this someday
# _check_work(panel.groupby(lambda x: x.month, axis=1))
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
def test_panel_groupby():
panel = tm.makePanel()
tm.add_nans(panel)
grouped = panel.groupby({'ItemA': 0, 'ItemB': 0, 'ItemC': 1},
axis='items')
agged = grouped.mean()
agged2 = grouped.agg(lambda x: x.mean('items'))
tm.assert_panel_equal(agged, agged2)
tm.assert_index_equal(agged.items, Index([0, 1]))
grouped = panel.groupby(lambda x: x.month, axis='major')
agged = grouped.mean()
exp = Index(sorted(list(set(panel.major_axis.month))))
tm.assert_index_equal(agged.major_axis, exp)
grouped = panel.groupby({'A': 0, 'B': 0, 'C': 1, 'D': 1},
axis='minor')
agged = grouped.mean()
tm.assert_index_equal(agged.minor_axis, Index([0, 1]))
def test_groupby_2d_malformed():
d = DataFrame(index=lrange(2))
d['group'] = ['g1', 'g2']
d['zeros'] = [0, 0]
d['ones'] = [1, 1]
d['label'] = ['l1', 'l2']
tmp = d.groupby(['group']).mean()
res_values = np.array([[0, 1], [0, 1]], dtype=np.int64)
tm.assert_index_equal(tmp.columns, Index(['zeros', 'ones']))
tm.assert_numpy_array_equal(tmp.values, res_values)
def test_int32_overflow():
B = np.concatenate((np.arange(10000), np.arange(10000), np.arange(5000)
))
A = np.arange(25000)
df = DataFrame({'A': A,
'B': B,
'C': A,
'D': B,
'E': np.random.randn(25000)})
left = df.groupby(['A', 'B', 'C', 'D']).sum()
right = df.groupby(['D', 'C', 'B', 'A']).sum()
assert len(left) == len(right)
def test_groupby_sort_multi():
df = DataFrame({'a': ['foo', 'bar', 'baz'],
'b': [3, 2, 1],
'c': [0, 1, 2],
'd': np.random.randn(3)})
tups = lmap(tuple, df[['a', 'b', 'c']].values)
tups = com.asarray_tuplesafe(tups)
result = df.groupby(['a', 'b', 'c'], sort=True).sum()
tm.assert_numpy_array_equal(result.index.values, tups[[1, 2, 0]])
tups = lmap(tuple, df[['c', 'a', 'b']].values)
tups = com.asarray_tuplesafe(tups)
result = df.groupby(['c', 'a', 'b'], sort=True).sum()
tm.assert_numpy_array_equal(result.index.values, tups)
tups = lmap(tuple, df[['b', 'c', 'a']].values)
tups = com.asarray_tuplesafe(tups)
result = df.groupby(['b', 'c', 'a'], sort=True).sum()
tm.assert_numpy_array_equal(result.index.values, tups[[2, 1, 0]])
df = DataFrame({'a': [0, 1, 2, 0, 1, 2],
'b': [0, 0, 0, 1, 1, 1],
'd': np.random.randn(6)})
grouped = df.groupby(['a', 'b'])['d']
result = grouped.sum()
def _check_groupby(df, result, keys, field, f=lambda x: x.sum()):
tups = lmap(tuple, df[keys].values)
tups = com.asarray_tuplesafe(tups)
expected = f(df.groupby(tups)[field])
for k, v in compat.iteritems(expected):
assert (result[k] == v)
_check_groupby(df, result, ['a', 'b'], 'd')
def test_dont_clobber_name_column():
df = DataFrame({'key': ['a', 'a', 'a', 'b', 'b', 'b'],
'name': ['foo', 'bar', 'baz'] * 2})
result = df.groupby('key').apply(lambda x: x)
assert_frame_equal(result, df)
def test_skip_group_keys():
tsf = tm.makeTimeDataFrame()
grouped = tsf.groupby(lambda x: x.month, group_keys=False)
result = grouped.apply(lambda x: x.sort_values(by='A')[:3])
pieces = []
for key, group in grouped:
pieces.append(group.sort_values(by='A')[:3])
expected = pd.concat(pieces)
assert_frame_equal(result, expected)
grouped = tsf['A'].groupby(lambda x: x.month, group_keys=False)
result = grouped.apply(lambda x: x.sort_values()[:3])
pieces = []
for key, group in grouped:
pieces.append(group.sort_values()[:3])
expected = pd.concat(pieces)
assert_series_equal(result, expected)
def test_no_nonsense_name(frame):
# GH #995
s = frame['C'].copy()
s.name = None
result = s.groupby(frame['A']).agg(np.sum)
assert result.name is None
def test_multifunc_sum_bug():
# GH #1065
x = DataFrame(np.arange(9).reshape(3, 3))
x['test'] = 0
x['fl'] = [1.3, 1.5, 1.6]
grouped = x.groupby('test')
result = grouped.agg({'fl': 'sum', 2: 'size'})
assert result['fl'].dtype == np.float64
def test_handle_dict_return_value(df):
def f(group):
return {'max': group.max(), 'min': group.min()}
def g(group):
return Series({'max': group.max(), 'min': group.min()})
result = df.groupby('A')['C'].apply(f)
expected = df.groupby('A')['C'].apply(g)
assert isinstance(result, Series)
assert_series_equal(result, expected)
@pytest.mark.parametrize('grouper', ['A', ['A', 'B']])
def test_set_group_name(df, grouper):
def f(group):
assert group.name is not None
return group
def freduce(group):
assert group.name is not None
return group.sum()
def foo(x):
return freduce(x)
grouped = df.groupby(grouper)
# make sure all these work
grouped.apply(f)
grouped.aggregate(freduce)
grouped.aggregate({'C': freduce, 'D': freduce})
grouped.transform(f)
grouped['C'].apply(f)
grouped['C'].aggregate(freduce)
grouped['C'].aggregate([freduce, foo])
grouped['C'].transform(f)
def test_group_name_available_in_inference_pass():
# gh-15062
df = pd.DataFrame({'a': [0, 0, 1, 1, 2, 2], 'b': np.arange(6)})
names = []
def f(group):
names.append(group.name)
return group.copy()
df.groupby('a', sort=False, group_keys=False).apply(f)
# we expect 2 zeros because we call ``f`` once to see if a faster route
# can be used.
expected_names = [0, 0, 1, 2]
assert names == expected_names
def test_no_dummy_key_names(df):
# see gh-1291
result = df.groupby(df['A'].values).sum()
assert result.index.name is None
result = df.groupby([df['A'].values, df['B'].values]).sum()
assert result.index.names == (None, None)
def test_groupby_sort_multiindex_series():
# series multiindex groupby sort argument was not being passed through
# _compress_group_index
# GH 9444
index = MultiIndex(levels=[[1, 2], [1, 2]],
labels=[[0, 0, 0, 0, 1, 1], [1, 1, 0, 0, 0, 0]],
names=['a', 'b'])
mseries = Series([0, 1, 2, 3, 4, 5], index=index)
index = MultiIndex(levels=[[1, 2], [1, 2]],
labels=[[0, 0, 1], [1, 0, 0]], names=['a', 'b'])
mseries_result = Series([0, 2, 4], index=index)
result = mseries.groupby(level=['a', 'b'], sort=False).first()
assert_series_equal(result, mseries_result)
result = mseries.groupby(level=['a', 'b'], sort=True).first()
assert_series_equal(result, mseries_result.sort_index())
def test_groupby_reindex_inside_function():
periods = 1000
ind = DatetimeIndex(start='2012/1/1', freq='5min', periods=periods)
df = DataFrame({'high': np.arange(
periods), 'low': np.arange(periods)}, index=ind)
def agg_before(hour, func, fix=False):
"""
Run an aggregate func on the subset of data.
"""
def _func(data):
d = data.loc[data.index.map(
lambda x: x.hour < 11)].dropna()
if fix:
data[data.index[0]]
if len(d) == 0:
return None
return func(d)
return _func
def afunc(data):
d = data.select(lambda x: x.hour < 11).dropna()
return np.max(d)
grouped = df.groupby(lambda x: datetime(x.year, x.month, x.day))
closure_bad = grouped.agg({'high': agg_before(11, np.max)})
closure_good = grouped.agg({'high': agg_before(11, np.max, True)})
assert_frame_equal(closure_bad, closure_good)
def test_groupby_multiindex_missing_pair():
# GH9049
df = DataFrame({'group1': ['a', 'a', 'a', 'b'],
'group2': ['c', 'c', 'd', 'c'],
'value': [1, 1, 1, 5]})
df = df.set_index(['group1', 'group2'])
df_grouped = df.groupby(level=['group1', 'group2'], sort=True)
res = df_grouped.agg('sum')
idx = MultiIndex.from_tuples(
[('a', 'c'), ('a', 'd'), ('b', 'c')], names=['group1', 'group2'])
exp = DataFrame([[2], [1], [5]], index=idx, columns=['value'])
tm.assert_frame_equal(res, exp)
def test_groupby_multiindex_not_lexsorted():
# GH 11640
# define the lexsorted version
lexsorted_mi = MultiIndex.from_tuples(
[('a', ''), ('b1', 'c1'), ('b2', 'c2')], names=['b', 'c'])
lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)
assert lexsorted_df.columns.is_lexsorted()
# define the non-lexsorted version
not_lexsorted_df = DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3],
[1, 'b2', 'c2', 4]])
not_lexsorted_df = not_lexsorted_df.pivot_table(
index='a', columns=['b', 'c'], values='d')
not_lexsorted_df = not_lexsorted_df.reset_index()
assert not not_lexsorted_df.columns.is_lexsorted()
# compare the results
tm.assert_frame_equal(lexsorted_df, not_lexsorted_df)
expected = lexsorted_df.groupby('a').mean()
with tm.assert_produces_warning(PerformanceWarning):
result = not_lexsorted_df.groupby('a').mean()
tm.assert_frame_equal(expected, result)
# a transforming function should work regardless of sort
# GH 14776
df = DataFrame({'x': ['a', 'a', 'b', 'a'],
'y': [1, 1, 2, 2],
'z': [1, 2, 3, 4]}).set_index(['x', 'y'])
assert not df.index.is_lexsorted()
for level in [0, 1, [0, 1]]:
for sort in [False, True]:
result = df.groupby(level=level, sort=sort).apply(
DataFrame.drop_duplicates)
expected = df
tm.assert_frame_equal(expected, result)
result = df.sort_index().groupby(level=level, sort=sort).apply(
DataFrame.drop_duplicates)
expected = df.sort_index()
tm.assert_frame_equal(expected, result)
def test_index_label_overlaps_location():
# checking we don't have any label/location confusion in the
# the wake of GH5375
df = DataFrame(list('ABCDE'), index=[2, 0, 2, 1, 1])
g = df.groupby(list('ababb'))
actual = g.filter(lambda x: len(x) > 2)
expected = df.iloc[[1, 3, 4]]
assert_frame_equal(actual, expected)
ser = df[0]
g = ser.groupby(list('ababb'))
actual = g.filter(lambda x: len(x) > 2)
expected = ser.take([1, 3, 4])
assert_series_equal(actual, expected)
# ... and again, with a generic Index of floats
df.index = df.index.astype(float)
g = df.groupby(list('ababb'))
actual = g.filter(lambda x: len(x) > 2)
expected = df.iloc[[1, 3, 4]]
assert_frame_equal(actual, expected)
ser = df[0]
g = ser.groupby(list('ababb'))
actual = g.filter(lambda x: len(x) > 2)
expected = ser.take([1, 3, 4])
assert_series_equal(actual, expected)
def test_transform_doesnt_clobber_ints():
# GH 7972
n = 6
x = np.arange(n)
df = DataFrame({'a': x // 2, 'b': 2.0 * x, 'c': 3.0 * x})
df2 = DataFrame({'a': x // 2 * 1.0, 'b': 2.0 * x, 'c': 3.0 * x})
gb = df.groupby('a')
result = gb.transform('mean')
gb2 = df2.groupby('a')
expected = gb2.transform('mean')
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('sort_column', ['ints', 'floats', 'strings',
['ints', 'floats'],
['ints', 'strings']])
@pytest.mark.parametrize('group_column', ['int_groups', 'string_groups',
['int_groups', 'string_groups']])
def test_groupby_preserves_sort(sort_column, group_column):
# Test to ensure that groupby always preserves sort order of original
# object. Issue #8588 and #9651
df = DataFrame(
{'int_groups': [3, 1, 0, 1, 0, 3, 3, 3],
'string_groups': ['z', 'a', 'z', 'a', 'a', 'g', 'g', 'g'],
'ints': [8, 7, 4, 5, 2, 9, 1, 1],
'floats': [2.3, 5.3, 6.2, -2.4, 2.2, 1.1, 1.1, 5],
'strings': ['z', 'd', 'a', 'e', 'word', 'word2', '42', '47']})
# Try sorting on different types and with different group types
df = df.sort_values(by=sort_column)
g = df.groupby(group_column)
def test_sort(x):
assert_frame_equal(x, x.sort_values(by=sort_column))
g.apply(test_sort)
def test_group_shift_with_null_key():
# This test is designed to replicate the segfault in issue #13813.
n_rows = 1200
# Generate a moderately large dataframe with occasional missing
# values in column `B`, and then group by [`A`, `B`]. This should
# force `-1` in `labels` array of `g.grouper.group_info` exactly
# at those places, where the group-by key is partially missing.
df = DataFrame([(i % 12, i % 3 if i % 3 else np.nan, i)
for i in range(n_rows)], dtype=float,
columns=["A", "B", "Z"], index=None)
g = df.groupby(["A", "B"])
expected = DataFrame([(i + 12 if i % 3 and i < n_rows - 12
else np.nan)
for i in range(n_rows)], dtype=float,
columns=["Z"], index=None)
result = g.shift(-1)
assert_frame_equal(result, expected)
def test_pivot_table_values_key_error():
# This test is designed to replicate the error in issue #14938
df = pd.DataFrame({'eventDate':
pd.date_range(pd.datetime.today(),
periods=20, freq='M').tolist(),
'thename': range(0, 20)})
df['year'] = df.set_index('eventDate').index.year
df['month'] = df.set_index('eventDate').index.month
with pytest.raises(KeyError):
df.reset_index().pivot_table(index='year', columns='month',
values='badname', aggfunc='count')
def test_empty_dataframe_groupby():
# GH8093
df = DataFrame(columns=['A', 'B', 'C'])
result = df.groupby('A').sum()
expected = DataFrame(columns=['B', 'C'], dtype=np.float64)
expected.index.name = 'A'
assert_frame_equal(result, expected)
def test_tuple_warns():
# https://github.com/pandas-dev/pandas/issues/18314
df = pd.DataFrame({('a', 'b'): [1, 1, 2, 2], 'a': [1, 1, 1, 2],
'b': [1, 2, 2, 2], 'c': [1, 1, 1, 1]})
with tm.assert_produces_warning(FutureWarning) as w:
df[['a', 'b', 'c']].groupby(('a', 'b')).c.mean()
assert "Interpreting tuple 'by' as a list" in str(w[0].message)
with tm.assert_produces_warning(None):
df.groupby(('a', 'b')).c.mean()
def test_tuple_warns_unhashable():
# https://github.com/pandas-dev/pandas/issues/18314
business_dates = date_range(start='4/1/2014', end='6/30/2014',
freq='B')
df = DataFrame(1, index=business_dates, columns=['a', 'b'])
with tm.assert_produces_warning(FutureWarning) as w:
df.groupby((df.index.year, df.index.month)).nth([0, 3, -1])
assert "Interpreting tuple 'by' as a list" in str(w[0].message)
def test_tuple_correct_keyerror():
# https://github.com/pandas-dev/pandas/issues/18798
df = pd.DataFrame(1, index=range(3),
columns=pd.MultiIndex.from_product([[1, 2],
[3, 4]]))
with tm.assert_raises_regex(KeyError, "(7, 8)"):
df.groupby((7, 8)).mean()
def test_groupby_agg_ohlc_non_first():
# GH 21716
df = pd.DataFrame([[1], [1]], columns=['foo'],
index=pd.date_range('2018-01-01', periods=2, freq='D'))
expected = pd.DataFrame([
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1]
], columns=pd.MultiIndex.from_tuples((
('foo', 'ohlc', 'open'), ('foo', 'ohlc', 'high'),
('foo', 'ohlc', 'low'), ('foo', 'ohlc', 'close'),
('foo', 'sum', 'foo'))), index=pd.date_range(
'2018-01-01', periods=2, freq='D'))
result = df.groupby(pd.Grouper(freq='D')).agg(['sum', 'ohlc'])
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
droundy/deft | papers/water-saft/figs/density-compare.py | 1 | 1705 | #!/usr/bin/env python
#need this to run without xserver
import matplotlib
matplotlib.use('Agg')
import math
import matplotlib.pyplot as pyplot
import numpy
import pylab
from matplotlib.patches import Ellipse
nm = 18.8972613
gpermL=4.9388942e-3/0.996782051315 # conversion from atomic units to mass density
grey = '#999999'
blueish = '#99cccc'#'#aadddd' #'#55dae0'
rod = '#666666'
hugdata = pylab.loadtxt('figs/hughes-single-rod-1nm-density.dat')
rhug = hugdata[:, 0]/nm
hugdensity = hugdata[:, 1]/gpermL
p1, = pylab.plot(rhug, hugdensity, color = '#3333aa', linestyle='--')
newdata = pylab.loadtxt('figs/single-rod-1nm-density.dat')
rnew = newdata[:, 0]/nm
newdensity = newdata[:, 1]/gpermL
p2, = pylab.plot(rnew, newdensity, color = '#dd6677', linestyle='-')
pyplot.hlines(1, 0, 1.3, 'black', ':')
circleheight = 0.25
ymax = 3.1
rmax = 1.2
hardsphere_diameter = 3.0342/10 # nm
rod_radius = 0.25 # nm
pyplot.vlines([rod_radius - hardsphere_diameter/2], 0, ymax, rod, '-')
xpoints = [rod_radius + n*hardsphere_diameter for n in range(4)]
ypoints = [circleheight]*4
pyplot.plot(xpoints, ypoints, marker = 'o', color = 'black', linestyle = '')
fig = pyplot.gcf()
for n in range(4):
xpos = rod_radius + n*hardsphere_diameter
pyplot.vlines(xpos, 0, ymax, grey, ':')
fig.gca().add_artist(Ellipse((xpos, circleheight),
hardsphere_diameter, 1.2*hardsphere_diameter*ymax/rmax,
color = blueish, fill=False))
#plot properties
pyplot.ylabel('Density (g/mL)')
pyplot.xlabel('Radius (nm)')
pyplot.ylim(0, ymax)
pyplot.xlim(0, rmax)
pyplot.legend([p1, p2], ["Hughes, et al", "This work"])
pyplot.savefig('figs/density-compare.pdf')
| gpl-2.0 |
kirel/political-affiliation-prediction | partyprograms.py | 2 | 3428 | # -*- coding: utf-8 -*-
import re
import cPickle
from classifier import Classifier
import json
from scipy import ones,argmax
from sklearn.metrics import classification_report,confusion_matrix
def partyprograms(folder='model'):
clf = Classifier(folder=folder)
# converted with pdftotext
text = {}
bow = {}
# from https://www.spd.de/linkableblob/96686/data/20130415_regierungsprogramm_2013_2017.pdf
txt = open(folder+'/textdata/SPD_programm.txt').read()
# remove page footer
txt = re.sub(r'\W+Das Regierungsprogramm 2013 – 2017\W+\d+\W+','\n',txt)
# split in sections
txt = re.split('\n(IX|IV|V?I{0,3}\.\d? )',txt)
text['spd'] = txt
# from http://www.cdu.de/sites/default/files/media/dokumente/regierungsprogramm-2013-2017-langfassung-20130911.pdf
txt = open(folder+'/textdata/CDU_programm.txt').read()
# remove page footer
txt = re.sub(r'\W+Gemeinsam erfolgreich für Deutschland | Regierungsprogramm 2013 – 2017\W+','\n',txt)
# remove page numbers
txt = re.sub(r'\n\d+\n',' ',txt)
# get sections
txt = re.split(r'\n\d\.\d?\W',txt)
# remove sections without proper text
txt = [t for t in txt if len(t)>1000]
text['cdu'] = txt
# from https://www.die-linke.de/fileadmin/download/wahlen2013/bundestagswahlprogramm/bundestagswahlprogramm2013_langfassung.pdf
txt = open(folder+'/textdata/LINKE_programm.txt').read()
# remove page numbers
txt = re.sub(r'\n\d+\n',' ',txt)
# get sections
txt = re.split('\n\n+',txt)
# remove sections without proper text
txt = [t for t in txt if len(t)>1000]
text['linke'] = txt
# from http://www.gruene.de/fileadmin/user_upload/Dokumente/Wahlprogramm/Wahlprogramm-barrierefrei.pdf
txt = open(folder+'/textdata/GRUENE_programm.txt').read()
# remove page footer
txt = re.sub(r'(\d+)?\W+Bundestagswahlprogramm 2013\nBündnis 90/Die Grünen\W+\d?\n','\n',txt)
txt = re.sub(r'Teilhaben. Einmischen. Zukunft schaffen.','',txt)
txt = re.sub(r'Zeit für den grünen Wandel','',txt)
# remove page numbers
txt = re.sub(r'\n\d+\n',' ',txt)
# get sections
txt = re.split(r'\n\d\.\d?\W',txt)
# remove sections without proper text
txt = [t for t in txt if len(t)>1000]
text['gruene'] = txt
json.dump(text,open(folder+'/textdata/programs.json', 'wb'),ensure_ascii=False)
predictions,predictions_total = dict(),dict()
Ytrue, Yhat = [],[]
for key in text.keys():
predictions[key] = []
# for each paragraph separately
for paragraph in text[key]:
prediction = clf.predict(paragraph)['prediction']
idx = argmax([x['probability'] for x in prediction])
Yhat.append(text.keys().index(prediction[idx]['party']))
predictions[key].append(prediction)
#predictions[key] = map(lambda x: clf.predict(x)['prediction'],text[key])
# for the entire program at once
predictions_total[key] = clf.predict(' '.join(text[key]))['prediction']
Ytrue.extend(ones(len(text[key]))*text.keys().index(key))
print(confusion_matrix(Ytrue,Yhat))
print(classification_report(Ytrue,Yhat,target_names=text.keys()))
json.dump(predictions,open(folder+'/textdata/predictions.json','wb'),ensure_ascii=False)
json.dump(predictions_total,open(folder+'/textdata/predictions_total.json','wb'),ensure_ascii=False)
| mit |
GreenGear5/planet-wars | bots/alphabetaregular/alphabetaregular.py | 1 | 4321 | #!/usr/bin/env python
"""
A basic adaptive bot. This is part of the second worksheet.
"""
from api import State, util
import random, os
from sklearn.externals import joblib
DEFAULT_MODEL = os.path.dirname(os.path.realpath(__file__)) + '/alphabetaregular-model.pkl'
class Bot:
__max_depth = -1
__randomize = True
__model = None
def __init__(self, randomize=True, depth=4, model_file=DEFAULT_MODEL):
print(model_file)
self.__randomize = randomize
self.__max_depth = depth
# Load the model
self.__model = joblib.load(model_file)
def get_move(self, state):
val, move = self.value(state)
return move
def value(self, state, alpha=float('-inf'), beta=float('inf'), depth = 0):
"""
Return the value of this state and the associated move
:param state:
:param alpha: The highest score that the maximizing player can guarantee given current knowledge
:param beta: The lowest score that the minimizing player can guarantee given current knowledge
:param depth: How deep we are in the tree
:return: val, move: the value of the state, and the best move.
"""
if state.finished():
return (1.0, None) if state.winner() == 1 else (-1.0, None)
if depth == self.__max_depth:
return self.heuristic(state), None
best_value = float('-inf') if maximizing(state) else float('inf')
best_move = None
moves = state.moves()
if self.__randomize:
random.shuffle(moves)
for move in moves:
next_state = state.next(move)
value, m = self.value(next_state, alpha, beta, depth + 1)
if maximizing(state):
if value > best_value:
best_value = value
best_move = move
alpha = best_value
else:
if value < best_value:
best_value = value
best_move = move
beta = best_value
# Prune the search tree
# We know this state will never be chosen, so we stop evaluating its children
if alpha < beta:
break
return best_value, best_move
def heuristic(self, state):
# Convert the state to a feature vector
feature_vector = [features(state)]
# These are the classes: ('won', 'lost')
classes = list(self.__model.classes_)
# Ask the model for a prediction
# This returns a probability for each class
prob = self.__model.predict_proba(feature_vector)[0]
# print('{} {} {}'.format(classes, prob, util.ratio_ships(state, 1)))
# Weigh the win/loss outcomes (-1 and 1) by their probabilities
res = -1.0 * prob[classes.index('lost')] + 1.0 * prob[classes.index('won')]
# print(res)
return res
def maximizing(state):
"""
Whether we're the maximizing player (1) or the minimizing player (2).
:param state:
:return:
"""
return state.whose_turn() == 1
def features(state):
# type: (State) -> tuple[float, ...]
"""
Extract features from this state. Remember that every feature vector returned should have the same length.
:param state: A state to be converted to a feature vector
:return: A tuple of floats: a feature vector representing this state.
"""
my_id = state.whose_turn()
opponent_id = 1 if my_id == 0 else 0
# How many ships does p1 have in garrisons?
p1_garrisons = 0.0
# How many ships does p2 have in garrisons?
p2_garrisons = 0.0
p1_planets = 0
p2_planets = 0
for planet in state.planets(my_id):
p1_garrisons += state.garrison(planet)
p1_planets += 1
for planet in state.planets(opponent_id):
p2_garrisons += state.garrison(planet)
p2_planets += 1
# How many ships does p1 have in fleets?
p1_fleets = 0.0
# How many ships does p2 have in fleets?
p2_fleets = 0.0
for fleet in state.fleets():
if fleet.owner() == my_id:
p1_fleets = fleet.size()
else:
p2_fleets += fleet.size()
return p1_garrisons, p2_garrisons, p1_fleets, p2_fleets, p1_planets, p2_planets
| mit |
vybstat/scikit-learn | examples/linear_model/plot_robust_fit.py | 238 | 2414 | """
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worst than OLS.
"""
from matplotlib import pyplot as plt
import numpy as np
from sklearn import linear_model, metrics
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [('OLS', linear_model.LinearRegression()),
('Theil-Sen', linear_model.TheilSenRegressor(random_state=42)),
('RANSAC', linear_model.RANSACRegressor(random_state=42)), ]
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
('Modeling errors only', X, y),
('Corrupt X, small deviants', X_errors, y),
('Corrupt y, small deviants', X, y_errors),
('Corrupt X, large deviants', X_errors_large, y),
('Corrupt y, large deviants', X, y_errors_large)]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, 'k+')
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = metrics.mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(x_plot, y_plot,
label='%s: error = %.3f' % (name, mse))
plt.legend(loc='best', frameon=False,
title='Error: mean absolute deviation\n to non corrupt data')
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
| bsd-3-clause |
alexeyum/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 4 | 26200 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import laplacian_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_BOOLEAN_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
from sklearn.exceptions import DataConversionWarning
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unknown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
# ignore conversion to boolean in pairwise_distances
@ignore_warnings(category=DataConversionWarning)
def test_pairwise_boolean_distance():
# test that we convert to boolean arrays for boolean distances
rng = np.random.RandomState(0)
X = rng.randn(5, 4)
Y = X.copy()
Y[0, 0] = 1 - Y[0, 0]
for metric in PAIRWISE_BOOLEAN_FUNCTIONS:
for Z in [Y, None]:
res = pairwise_distances(X, Z, metric=metric)
res[np.isnan(res)] = 0
assert_true(np.sum(res != 0) == 0)
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1.]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1.]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels(): # Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "laplacian", "sigmoid", "polynomial", "linear",
"chi2", "additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {'gamma': 0.1}
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1)
Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_laplacian_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = laplacian_kernel(X, X)
# the diagonal elements of a laplacian kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(5))
# off-diagonal elements are < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
# The modified tests are not 1D. In the old test, the array was internally
# converted to 2D anyways
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
jjs0sbw/CSPLN | apps/scaffolding/mac/web2py/web2py.app/Contents/Resources/lib/python2.7/matplotlib/table.py | 3 | 16868 | """
Place a table below the x-axis at location loc.
The table consists of a grid of cells.
The grid need not be rectangular and can have holes.
Cells are added by specifying their row and column.
For the purposes of positioning the cell at (0, 0) is
assumed to be at the top left and the cell at (max_row, max_col)
is assumed to be at bottom right.
You can add additional cells outside this range to have convenient
ways of positioning more interesting grids.
Author : John Gill <[email protected]>
Copyright : 2004 John Gill and John Hunter
License : matplotlib license
"""
from __future__ import division
import warnings
import artist
from artist import Artist, allow_rasterization
from patches import Rectangle
from cbook import is_string_like
from matplotlib import docstring
from text import Text
from transforms import Bbox
class Cell(Rectangle):
"""
A cell is a Rectangle with some associated text.
"""
PAD = 0.1 # padding between text and rectangle
def __init__(self, xy, width, height,
edgecolor='k', facecolor='w',
fill=True,
text='',
loc=None,
fontproperties=None
):
# Call base
Rectangle.__init__(self, xy, width=width, height=height,
edgecolor=edgecolor, facecolor=facecolor,
)
self.set_clip_on(False)
# Create text object
if loc is None: loc = 'right'
self._loc = loc
self._text = Text(x=xy[0], y=xy[1], text=text,
fontproperties=fontproperties)
self._text.set_clip_on(False)
def set_transform(self, trans):
Rectangle.set_transform(self, trans)
# the text does not get the transform!
def set_figure(self, fig):
Rectangle.set_figure(self, fig)
self._text.set_figure(fig)
def get_text(self):
'Return the cell Text intance'
return self._text
def set_fontsize(self, size):
self._text.set_fontsize(size)
def get_fontsize(self):
'Return the cell fontsize'
return self._text.get_fontsize()
def auto_set_font_size(self, renderer):
""" Shrink font size until text fits. """
fontsize = self.get_fontsize()
required = self.get_required_width(renderer)
while fontsize > 1 and required > self.get_width():
fontsize -= 1
self.set_fontsize(fontsize)
required = self.get_required_width(renderer)
return fontsize
@allow_rasterization
def draw(self, renderer):
if not self.get_visible(): return
# draw the rectangle
Rectangle.draw(self, renderer)
# position the text
self._set_text_position(renderer)
self._text.draw(renderer)
def _set_text_position(self, renderer):
""" Set text up so it draws in the right place.
Currently support 'left', 'center' and 'right'
"""
bbox = self.get_window_extent(renderer)
l, b, w, h = bbox.bounds
# draw in center vertically
self._text.set_verticalalignment('center')
y = b + (h / 2.0)
# now position horizontally
if self._loc == 'center':
self._text.set_horizontalalignment('center')
x = l + (w / 2.0)
elif self._loc == 'left':
self._text.set_horizontalalignment('left')
x = l + (w * self.PAD)
else:
self._text.set_horizontalalignment('right')
x = l + (w * (1.0 - self.PAD))
self._text.set_position((x, y))
def get_text_bounds(self, renderer):
""" Get text bounds in axes co-ordinates. """
bbox = self._text.get_window_extent(renderer)
bboxa = bbox.inverse_transformed(self.get_data_transform())
return bboxa.bounds
def get_required_width(self, renderer):
""" Get width required for this cell. """
l,b,w,h = self.get_text_bounds(renderer)
return w * (1.0 + (2.0 * self.PAD))
def set_text_props(self, **kwargs):
'update the text properties with kwargs'
self._text.update(kwargs)
class Table(Artist):
"""
Create a table of cells.
Table can have (optional) row and column headers.
Each entry in the table can be either text or patches.
Column widths and row heights for the table can be specifified.
Return value is a sequence of text, line and patch instances that make
up the table
"""
codes = {'best' : 0,
'upper right' : 1, # default
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'center left' : 5,
'center right' : 6,
'lower center' : 7,
'upper center' : 8,
'center' : 9,
'top right' : 10,
'top left' : 11,
'bottom left' : 12,
'bottom right' : 13,
'right' : 14,
'left' : 15,
'top' : 16,
'bottom' : 17,
}
FONTSIZE = 10
AXESPAD = 0.02 # the border between the axes and table edge
def __init__(self, ax, loc=None, bbox=None):
Artist.__init__(self)
if is_string_like(loc) and loc not in self.codes:
warnings.warn('Unrecognized location %s. Falling back on bottom; valid locations are\n%s\t' %(loc, '\n\t'.join(self.codes.keys())))
loc = 'bottom'
if is_string_like(loc): loc = self.codes.get(loc, 1)
self.set_figure(ax.figure)
self._axes = ax
self._loc = loc
self._bbox = bbox
# use axes coords
self.set_transform(ax.transAxes)
self._texts = []
self._cells = {}
self._autoRows = []
self._autoColumns = []
self._autoFontsize = True
self._cachedRenderer = None
def add_cell(self, row, col, *args, **kwargs):
""" Add a cell to the table. """
xy = (0,0)
cell = Cell(xy, *args, **kwargs)
cell.set_figure(self.figure)
cell.set_transform(self.get_transform())
cell.set_clip_on(False)
self._cells[(row, col)] = cell
def _approx_text_height(self):
return self.FONTSIZE/72.0*self.figure.dpi/self._axes.bbox.height * 1.2
@allow_rasterization
def draw(self, renderer):
# Need a renderer to do hit tests on mouseevent; assume the last one will do
if renderer is None:
renderer = self._cachedRenderer
if renderer is None:
raise RuntimeError('No renderer defined')
self._cachedRenderer = renderer
if not self.get_visible(): return
renderer.open_group('table')
self._update_positions(renderer)
keys = self._cells.keys()
keys.sort()
for key in keys:
self._cells[key].draw(renderer)
#for c in self._cells.itervalues():
# c.draw(renderer)
renderer.close_group('table')
def _get_grid_bbox(self, renderer):
"""Get a bbox, in axes co-ordinates for the cells.
Only include those in the range (0,0) to (maxRow, maxCol)"""
boxes = [self._cells[pos].get_window_extent(renderer)
for pos in self._cells.keys()
if pos[0] >= 0 and pos[1] >= 0]
bbox = Bbox.union(boxes)
return bbox.inverse_transformed(self.get_transform())
def contains(self,mouseevent):
"""Test whether the mouse event occurred in the table.
Returns T/F, {}
"""
if callable(self._contains): return self._contains(self,mouseevent)
# TODO: Return index of the cell containing the cursor so that the user
# doesn't have to bind to each one individually.
if self._cachedRenderer is not None:
boxes = [self._cells[pos].get_window_extent(self._cachedRenderer)
for pos in self._cells.keys()
if pos[0] >= 0 and pos[1] >= 0]
bbox = bbox_all(boxes)
return bbox.contains(mouseevent.x,mouseevent.y),{}
else:
return False,{}
def get_children(self):
'Return the Artists contained by the table'
return self._cells.values()
get_child_artists = get_children # backward compatibility
def get_window_extent(self, renderer):
'Return the bounding box of the table in window coords'
boxes = [c.get_window_extent(renderer) for c in self._cells]
return bbox_all(boxes)
def _do_cell_alignment(self):
""" Calculate row heights and column widths.
Position cells accordingly.
"""
# Calculate row/column widths
widths = {}
heights = {}
for (row, col), cell in self._cells.iteritems():
height = heights.setdefault(row, 0.0)
heights[row] = max(height, cell.get_height())
width = widths.setdefault(col, 0.0)
widths[col] = max(width, cell.get_width())
# work out left position for each column
xpos = 0
lefts = {}
cols = widths.keys()
cols.sort()
for col in cols:
lefts[col] = xpos
xpos += widths[col]
ypos = 0
bottoms = {}
rows = heights.keys()
rows.sort()
rows.reverse()
for row in rows:
bottoms[row] = ypos
ypos += heights[row]
# set cell positions
for (row, col), cell in self._cells.iteritems():
cell.set_x(lefts[col])
cell.set_y(bottoms[row])
def auto_set_column_width(self, col):
self._autoColumns.append(col)
def _auto_set_column_width(self, col, renderer):
""" Automagically set width for column.
"""
cells = [key for key in self._cells if key[1] == col]
# find max width
width = 0
for cell in cells:
c = self._cells[cell]
width = max(c.get_required_width(renderer), width)
# Now set the widths
for cell in cells:
self._cells[cell].set_width(width)
def auto_set_font_size(self, value=True):
""" Automatically set font size. """
self._autoFontsize = value
def _auto_set_font_size(self, renderer):
if len(self._cells) == 0:
return
fontsize = self._cells.values()[0].get_fontsize()
cells = []
for key, cell in self._cells.iteritems():
# ignore auto-sized columns
if key[1] in self._autoColumns: continue
size = cell.auto_set_font_size(renderer)
fontsize = min(fontsize, size)
cells.append(cell)
# now set all fontsizes equal
for cell in self._cells.itervalues():
cell.set_fontsize(fontsize)
def scale(self, xscale, yscale):
""" Scale column widths by xscale and row heights by yscale. """
for c in self._cells.itervalues():
c.set_width(c.get_width() * xscale)
c.set_height(c.get_height() * yscale)
def set_fontsize(self, size):
"""
Set the fontsize of the cell text
ACCEPTS: a float in points
"""
for cell in self._cells.itervalues():
cell.set_fontsize(size)
def _offset(self, ox, oy):
'Move all the artists by ox,oy (axes coords)'
for c in self._cells.itervalues():
x, y = c.get_x(), c.get_y()
c.set_x(x+ox)
c.set_y(y+oy)
def _update_positions(self, renderer):
# called from renderer to allow more precise estimates of
# widths and heights with get_window_extent
# Do any auto width setting
for col in self._autoColumns:
self._auto_set_column_width(col, renderer)
if self._autoFontsize:
self._auto_set_font_size(renderer)
# Align all the cells
self._do_cell_alignment()
bbox = self._get_grid_bbox(renderer)
l,b,w,h = bbox.bounds
if self._bbox is not None:
# Position according to bbox
rl, rb, rw, rh = self._bbox
self.scale(rw/w, rh/h)
ox = rl - l
oy = rb - b
self._do_cell_alignment()
else:
# Position using loc
(BEST, UR, UL, LL, LR, CL, CR, LC, UC, C,
TR, TL, BL, BR, R, L, T, B) = range(len(self.codes))
# defaults for center
ox = (0.5-w/2)-l
oy = (0.5-h/2)-b
if self._loc in (UL, LL, CL): # left
ox = self.AXESPAD - l
if self._loc in (BEST, UR, LR, R, CR): # right
ox = 1 - (l + w + self.AXESPAD)
if self._loc in (BEST, UR, UL, UC): # upper
oy = 1 - (b + h + self.AXESPAD)
if self._loc in (LL, LR, LC): # lower
oy = self.AXESPAD - b
if self._loc in (LC, UC, C): # center x
ox = (0.5-w/2)-l
if self._loc in (CL, CR, C): # center y
oy = (0.5-h/2)-b
if self._loc in (TL, BL, L): # out left
ox = - (l + w)
if self._loc in (TR, BR, R): # out right
ox = 1.0 - l
if self._loc in (TR, TL, T): # out top
oy = 1.0 - b
if self._loc in (BL, BR, B): # out bottom
oy = - (b + h)
self._offset(ox, oy)
def get_celld(self):
'return a dict of cells in the table'
return self._cells
def table(ax,
cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None):
"""
TABLE(cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None)
Factory function to generate a Table instance.
Thanks to John Gill for providing the class and table.
"""
# Check we have some cellText
if cellText is None:
# assume just colours are needed
rows = len(cellColours)
cols = len(cellColours[0])
cellText = [[''] * rows] * cols
rows = len(cellText)
cols = len(cellText[0])
for row in cellText:
assert len(row) == cols
if cellColours is not None:
assert len(cellColours) == rows
for row in cellColours:
assert len(row) == cols
else:
cellColours = ['w' * cols] * rows
# Set colwidths if not given
if colWidths is None:
colWidths = [1.0/cols] * cols
# Check row and column labels
rowLabelWidth = 0
if rowLabels is None:
if rowColours is not None:
rowLabels = [''] * cols
rowLabelWidth = colWidths[0]
elif rowColours is None:
rowColours = 'w' * rows
if rowLabels is not None:
assert len(rowLabels) == rows
offset = 0
if colLabels is None:
if colColours is not None:
colLabels = [''] * rows
offset = 1
elif colColours is None:
colColours = 'w' * cols
offset = 1
if rowLabels is not None:
assert len(rowLabels) == rows
# Set up cell colours if not given
if cellColours is None:
cellColours = ['w' * cols] * rows
# Now create the table
table = Table(ax, loc, bbox)
height = table._approx_text_height()
# Add the cells
for row in xrange(rows):
for col in xrange(cols):
table.add_cell(row+offset, col,
width=colWidths[col], height=height,
text=cellText[row][col],
facecolor=cellColours[row][col],
loc=cellLoc)
# Do column labels
if colLabels is not None:
for col in xrange(cols):
table.add_cell(0, col,
width=colWidths[col], height=height,
text=colLabels[col], facecolor=colColours[col],
loc=colLoc)
# Do row labels
if rowLabels is not None:
for row in xrange(rows):
table.add_cell(row+offset, -1,
width=rowLabelWidth or 1e-15, height=height,
text=rowLabels[row], facecolor=rowColours[row],
loc=rowLoc)
if rowLabelWidth == 0:
table.auto_set_column_width(-1)
ax.add_table(table)
return table
docstring.interpd.update(Table=artist.kwdoc(Table))
| gpl-3.0 |
beepee14/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 181 | 15664 | from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
import scipy.sparse as sp
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions and return_indicator with True
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_multilabel_classification_return_indicator_sparse():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator='sparse',
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(sp.issparse(Y))
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
| bsd-3-clause |
deepakantony/sms-tools | lectures/08-Sound-transformations/plots-code/sineModelTimeScale-functions.py | 24 | 2725 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, hanning, triang, blackmanharris, resample
from scipy.fftpack import fft, ifft, fftshift
import sys, os, functools, time, math
from scipy.interpolate import interp1d
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import sineModel as SM
import stft as STFT
import sineModel as SM
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/mridangam.wav')
x1 = x[:int(1.49*fs)]
w = np.hamming(801)
N = 2048
t = -90
minSineDur = .005
maxnSines = 150
freqDevOffset = 20
freqDevSlope = 0.02
Ns = 512
H = Ns/4
sfreq, smag, sphase = SM.sineModelAnal(x1, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
timeScale = np.array([.01, .0, .03, .03, .335, .8, .355, .82, .671, 1.0, .691, 1.02, .858, 1.1, .878, 1.12, 1.185, 1.8, 1.205, 1.82, 1.49, 2.0])
L = sfreq[:,0].size # number of input frames
maxInTime = max(timeScale[::2]) # maximum value used as input times
maxOutTime = max(timeScale[1::2]) # maximum value used in output times
outL = int(L*maxOutTime/maxInTime) # number of output frames
inFrames = L*timeScale[::2]/maxInTime # input time values in frames
outFrames = outL*timeScale[1::2]/maxOutTime # output time values in frames
timeScalingEnv = interp1d(outFrames, inFrames, fill_value=0) # interpolation function
indexes = timeScalingEnv(np.arange(outL)) # generate frame indexes for the output
ysfreq = sfreq[round(indexes[0]),:] # first output frame
ysmag = smag[round(indexes[0]),:] # first output frame
for l in indexes[1:]: # generate frames for output sine tracks
ysfreq = np.vstack((ysfreq, sfreq[round(l),:]))
ysmag = np.vstack((ysmag, smag[round(l),:]))
mag1 = np.sum(10**(smag/20), axis=1)
mag2 = np.sum(10**(ysmag/20), axis=1)
mag1 = 20*np.log10(mag1)
mag2 = 20*np.log10(mag2)
plt.figure(1, figsize=(9, 7))
maxplotfreq = 4000.0
plt.subplot(3,1,1)
plt.plot(H*indexes/float(fs), H*np.arange(outL)/float(fs), color='k', lw=1.5)
plt.autoscale(tight=True)
plt.xlabel('input times')
plt.ylabel('output times')
plt.title('output scaling')
plt.subplot(3,1,2)
plt.plot(H*np.arange(mag1.size)/float(fs), mag1, color='k', lw=1.5)
plt.autoscale(tight=True)
plt.title('input magnitude sines')
plt.subplot(3,1,3)
plt.plot(H*np.arange(mag2.size)/float(fs), mag2, color='k', lw=1.5)
plt.autoscale(tight=True)
plt.title('output magnitude sines')
plt.tight_layout()
plt.savefig('sineModelTimeScale-functions.png')
plt.show()
| agpl-3.0 |
Pegasus99/Kaggle | bag of words meets bags of popcorn/using bag of words and random forest/script.py | 1 | 1831 | import pandas as pd
from sklearn.ensemble import RandomForestClassifier as rand_forest
from bs4 import BeautifulSoup
import numpy as np
import re
import nltk
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer
def clean_word (word):
soup_obj = BeautifulSoup(word)
kein_html = soup_obj.get_text()
nur_buchstaben = re.sub ("[^a-zA-Z]"," ",kein_html)
lower_case =nur_buchstaben.lower()
words =lower_case.split()
stop_words =set (stopwords.words("english"))
words = [ w for w in words if not w in stop_words ]
return (" ". join(words) )
def clean_reviews (reviews):
num_rev = len (reviews)
clean_review=[]
for i in xrange(0,num_rev):
if ( (i+1) %10000 ==0):
print ("now reached %d from %d "% (i,num_rev) )
clean_review.append( clean_word(reviews[i]) )
return clean_review
train_set = pd.read_csv("labeledTrainData.tsv",header=0 ,delimiter="\t",quoting=3)
clean_train_reviews=clean_reviews((train_set["review"]))
vectorizer = CountVectorizer(max_features=1000 )
train_data_features = vectorizer.fit_transform(clean_train_reviews)
train_data_features = train_data_features.toarray()
forest =rand_forest(n_estimators=100)
print("now to training")
forest =forest.fit(train_data_features,train_set["sentiment"] )
print("training is finished ")
test_set=pd.read_csv("testData.tsv",header=0 ,delimiter="\t",quoting=3)
clean_test_reviews = clean_reviews((test_set["review"]))
test_features = vectorizer.transform(clean_test_reviews)
clean_test_reviews=None
clean_train_reviews=None
train_set=None
test_features =test_features.toarray()
res = forest.predict(test_features)
output =pd.DataFrame(data={"id":test_set["id"],"sentiment":res})
output.to_csv("Pegasus_review.csv",index=False ,quoting=3)
| mit |
ishank08/scikit-learn | sklearn/cross_decomposition/tests/test_pls.py | 42 | 14294 | import numpy as np
from sklearn.utils.testing import (assert_equal, assert_array_almost_equal,
assert_array_equal, assert_true,
assert_raise_message)
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition import pls_, CCA
def test_pls():
d = load_linnerud()
X = d.data
Y = d.target
# 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A)
# ===========================================================
# Compare 2 algo.: nipals vs. svd
# ------------------------------
pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1])
pls_bynipals.fit(X, Y)
pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1])
pls_bysvd.fit(X, Y)
# check equalities of loading (up to the sign of the second column)
assert_array_almost_equal(
pls_bynipals.x_loadings_,
pls_bysvd.x_loadings_, decimal=5,
err_msg="nipals and svd implementations lead to different x loadings")
assert_array_almost_equal(
pls_bynipals.y_loadings_,
pls_bysvd.y_loadings_, decimal=5,
err_msg="nipals and svd implementations lead to different y loadings")
# Check PLS properties (with n_components=X.shape[1])
# ---------------------------------------------------
plsca = pls_.PLSCanonical(n_components=X.shape[1])
plsca.fit(X, Y)
T = plsca.x_scores_
P = plsca.x_loadings_
Wx = plsca.x_weights_
U = plsca.y_scores_
Q = plsca.y_loadings_
Wy = plsca.y_weights_
def check_ortho(M, err_msg):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(Wx, "x weights are not orthogonal")
check_ortho(Wy, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(T, "x scores are not orthogonal")
check_ortho(U, "y scores are not orthogonal")
# Check X = TP' and Y = UQ' (with (p == q) components)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# center scale X, Y
Xc, Yc, x_mean, y_mean, x_std, y_std =\
pls_._center_scale_xy(X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'")
assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'")
# Check that rotations on training data lead to scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Xr = plsca.transform(X)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
Xr, Yr = plsca.transform(X, Y)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
assert_array_almost_equal(Yr, plsca.y_scores_,
err_msg="rotation on Y failed")
# "Non regression test" on canonical PLS
# --------------------------------------
# The results were checked against the R-package plspm
pls_ca = pls_.PLSCanonical(n_components=X.shape[1])
pls_ca.fit(X, Y)
x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
# x_weights_sign_flip holds columns of 1 or -1, depending on sign flip
# between R and python
x_weights_sign_flip = pls_ca.x_weights_ / x_weights
x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
x_rotations_sign_flip = pls_ca.x_rotations_ / x_rotations
y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
y_weights_sign_flip = pls_ca.y_weights_ / y_weights
y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
y_rotations_sign_flip = pls_ca.y_rotations_ / y_rotations
# x_weights = X.dot(x_rotation)
# Hence R/python sign flip should be the same in x_weight and x_rotation
assert_array_almost_equal(x_rotations_sign_flip, x_weights_sign_flip)
# This test that R / python give the same result up to column
# sign indeterminacy
assert_array_almost_equal(np.abs(x_rotations_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(x_weights_sign_flip), 1, 4)
assert_array_almost_equal(y_rotations_sign_flip, y_weights_sign_flip)
assert_array_almost_equal(np.abs(y_rotations_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(y_weights_sign_flip), 1, 4)
# 2) Regression PLS (PLS2): "Non regression test"
# ===============================================
# The results were checked against the R-packages plspm, misOmics and pls
pls_2 = pls_.PLSRegression(n_components=X.shape[1])
pls_2.fit(X, Y)
x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
x_weights_sign_flip = pls_2.x_weights_ / x_weights
x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
x_loadings_sign_flip = pls_2.x_loadings_ / x_loadings
y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
y_weights_sign_flip = pls_2.y_weights_ / y_weights
y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
y_loadings_sign_flip = pls_2.y_loadings_ / y_loadings
# x_loadings[:, i] = Xi.dot(x_weights[:, i]) \forall i
assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip, 4)
assert_array_almost_equal(np.abs(x_loadings_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(x_weights_sign_flip), 1, 4)
assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip, 4)
assert_array_almost_equal(np.abs(y_loadings_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(y_weights_sign_flip), 1, 4)
# 3) Another non-regression test of Canonical PLS on random dataset
# =================================================================
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
np.random.seed(11)
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
np.random.seed(None)
pls_ca = pls_.PLSCanonical(n_components=3)
pls_ca.fit(X, Y)
x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
x_weights_sign_flip = pls_ca.x_weights_ / x_weights
x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
x_loadings_sign_flip = pls_ca.x_loadings_ / x_loadings
y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
y_weights_sign_flip = pls_ca.y_weights_ / y_weights
y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
y_loadings_sign_flip = pls_ca.y_loadings_ / y_loadings
assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip, 4)
assert_array_almost_equal(np.abs(x_weights_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(x_loadings_sign_flip), 1, 4)
assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip, 4)
assert_array_almost_equal(np.abs(y_weights_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(y_loadings_sign_flip), 1, 4)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_weights_, "x weights are not orthogonal")
check_ortho(pls_ca.y_weights_, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_scores_, "x scores are not orthogonal")
check_ortho(pls_ca.y_scores_, "y scores are not orthogonal")
def test_PLSSVD():
# Let's check the PLSSVD doesn't return all possible component but just
# the specified number
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]:
pls = clf(n_components=n_components)
pls.fit(X, Y)
assert_equal(n_components, pls.y_scores_.shape[1])
def test_univariate_pls_regression():
# Ensure 1d Y is correctly interpreted
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSRegression()
# Compare 1d to column vector
model1 = clf.fit(X, Y[:, 0]).coef_
model2 = clf.fit(X, Y[:, :1]).coef_
assert_array_almost_equal(model1, model2)
def test_predict_transform_copy():
# check that the "copy" keyword works
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSCanonical()
X_copy = X.copy()
Y_copy = Y.copy()
clf.fit(X, Y)
# check that results are identical with copy
assert_array_almost_equal(clf.predict(X), clf.predict(X.copy(), copy=False))
assert_array_almost_equal(clf.transform(X), clf.transform(X.copy(), copy=False))
# check also if passing Y
assert_array_almost_equal(clf.transform(X, Y),
clf.transform(X.copy(), Y.copy(), copy=False))
# check that copy doesn't destroy
# we do want to check exact equality here
assert_array_equal(X_copy, X)
assert_array_equal(Y_copy, Y)
# also check that mean wasn't zero before (to make sure we didn't touch it)
assert_true(np.all(X.mean(axis=0) != 0))
def test_scale_and_stability():
# We test scale=True parameter
# This allows to check numerical stability over platforms as well
d = load_linnerud()
X1 = d.data
Y1 = d.target
# causes X[:, -1].std() to be zero
X1[:, -1] = 1.0
# From bug #2821
# Test with X2, T2 s.t. clf.x_score[:, 1] == 0, clf.y_score[:, 1] == 0
# This test robustness of algorithm when dealing with value close to 0
X2 = np.array([[0., 0., 1.],
[1., 0., 0.],
[2., 2., 2.],
[3., 5., 4.]])
Y2 = np.array([[0.1, -0.2],
[0.9, 1.1],
[6.2, 5.9],
[11.9, 12.3]])
for (X, Y) in [(X1, Y1), (X2, Y2)]:
X_std = X.std(axis=0, ddof=1)
X_std[X_std == 0] = 1
Y_std = Y.std(axis=0, ddof=1)
Y_std[Y_std == 0] = 1
X_s = (X - X.mean(axis=0)) / X_std
Y_s = (Y - Y.mean(axis=0)) / Y_std
for clf in [CCA(), pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.set_params(scale=True)
X_score, Y_score = clf.fit_transform(X, Y)
clf.set_params(scale=False)
X_s_score, Y_s_score = clf.fit_transform(X_s, Y_s)
assert_array_almost_equal(X_s_score, X_score)
assert_array_almost_equal(Y_s_score, Y_score)
# Scaling should be idempotent
clf.set_params(scale=True)
X_score, Y_score = clf.fit_transform(X_s, Y_s)
assert_array_almost_equal(X_s_score, X_score)
assert_array_almost_equal(Y_s_score, Y_score)
def test_pls_errors():
d = load_linnerud()
X = d.data
Y = d.target
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.n_components = 4
assert_raise_message(ValueError, "Invalid number of components", clf.fit, X, Y)
| bsd-3-clause |
hansehe/Wind-Blade-Inspection | src/DroneVision/DroneVision_src/imgProcessing/featureDetection/generalDetectors/detectLines.py | 1 | 16807 | '''
Author: Hans Erik Heggem
Email: [email protected]
Project: Master's Thesis - Autonomous Inspection Of Wind Blades
Repository: Master's Thesis - CV (Computer Vision
'''
import math, cv2, operator
import numpy as np
from Settings.Exceptions import DroneVisionError
from src.DroneVision.DroneVision_src.imgProcessing.frameTools.frameTools import CheckGrayScale, CheckColor, GetShape
from src.DroneVision.DroneVision_src.hardware.imageTools import MatplotShow
def FindLineLimits(frame, hough_lines, keypoints, radi_threshold=None, radi_threshold_tuning_param=2.0, draw_hough_matrix=False, draw_bounded_lines=False, draw_max_min_lines=False, draw_arrowed_bounded_lines=True):
'''
@brief Find boundaries for all hough lines according to the point map.
Segments an object based on the point list by limiting the hough lines and detecting the boundary lines.
@param frame (grayscale)
@param hough_lines List of hough lines (rho, theta).
@param keypoints List of detected point positions.
@param radi_threshold Threshold in pixels to search for in vertical and horizontal axis (If none, then it is set to the biggest blob size)
@param radi_threshold_tuning_param Threshold tuning parameter (default=2 when using biggest blob size. < 0.5 is recommended when using distance betwen blobs).
@param draw_hough_matrix Draw hough lines matrix.
@param draw_bounded_lines Draw bounded lines.
@param draw_max_min_lines Draw detected max min lines.
@param draw_arrowed_bounded_lines
@return frame, bounded_lines, max_hor_line, min_hor_line, max_vert_line, min_vert_line
frame - Usually grayscale, but rgb frame if any draw flag == True
bounded_lines - list with vertical and horizontal lines.
bounded_lines[0] = horizontal lines list, bounded_lines[1] = vertical lines list,
where each index describing a line as ((x1,y1), (x2,y2))
max_min_lines - List of max and min horizontal and vertical lines, in this order:
max_hor_line - Bottom boundary line on the horizontal axis (Note: bottom = max index in images) as ((x1,y1), (x2,y2))
min_hor_line - Top boundary line on the horizontal axis (Note: top = min index in images) as ((x1,y1), (x2,y2))
max_vert_line - Right most boundary line on the vertical axis as ((x1,y1), (x2,y2))
min_vert_line - Left most boundary line on the horizontal axis as ((x1,y1), (x2,y2))
'''
x_points = np.zeros(len(keypoints)) #Width
y_points = np.zeros(len(keypoints)) #height
biggest_size = 0.0
for i in range(len(keypoints)):
if keypoints[i].size > biggest_size:
biggest_size = keypoints[i].size
x_points[i] = keypoints[i].pt[0]
y_points[i] = keypoints[i].pt[1]
# Tuning variable - search along vertical or horizontal axis is limited to this radius.
if radi_threshold == None:
radi_threshold = biggest_size
radi_threshold *= radi_threshold_tuning_param
min_vert_points = [[],[]]
max_vert_points = [[],[]]
min_hor_points = [[],[]]
max_hor_points = [[],[]]
bounded_lines = [[],[]]
width, height = GetShape(frame)
size = math.sqrt(math.pow(width, 2.0) + math.pow(height, 2.0))
max_vert_line_y_pos = -size*10 # Set invalid 'large' values to begin with
min_vert_line_y_pos = size*10
max_hor_line_x_pos = -size*10
min_hor_line_x_pos = size*10
if draw_hough_matrix:
frame = DrawHoughLines(frame, hough_lines)
for rho, theta in hough_lines:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
if round(np.rad2deg(a)) == 0: # Horizontal lines - search y_points
y_m_above = np.argwhere(y_points >= y0 - radi_threshold).T[0]
y_m_below = np.argwhere(y_points < y0 + radi_threshold).T[0]
y_m_ab_b = np.in1d(y_m_above, y_m_below)
y_m_be_b = np.in1d(y_m_below, y_m_above)
y_m_ab_in = np.argwhere(y_m_ab_b == True).T[0]
y_m_be_in = np.argwhere(y_m_be_b == True).T[0]
y_m_temp = np.zeros(len(y_m_ab_in) + len(y_m_be_in))
len_ab = len(y_m_ab_in)
for i in range(len(y_m_temp)):
if i < len_ab:
y_m_temp[i] = y_m_above[y_m_ab_in[i]]
else:
y_m_temp[i] = y_m_below[y_m_be_in[i-len_ab]]
y_m = np.unique(y_m_temp)
if len(y_m) < 2: # Ignore lines of only a single point.
continue
y_dist = {}
for y in y_m:
y = int(y)
y_dist[x_points[y]] = y_points[y]
sorted_y = sorted(y_dist.items(), key=operator.itemgetter(0))
min_point = sorted_y[0]
max_point = sorted_y[-1:][0]
xp = np.zeros(len(y_dist))
fp = np.zeros(len(y_dist))
i = 0
for x, y in sorted_y:
xp[i] = x
fp[i] = y
i += 1
y_inter = np.interp([min_point[0], max_point[0]], xp, fp)
x1 = int(round(min_point[0]))
y1 = int(round(y_inter[0]))
x2 = int(round(max_point[0]))
y2 = int(round(y_inter[1]))
min_hor_points[0].append(min_point[0]) #x
min_hor_points[1].append(y_inter[0]) #y
max_hor_points[0].append(max_point[0]) #x
max_hor_points[1].append(y_inter[1]) #y
average_line_x_pos = y_inter[0] + (y_inter[1] - y_inter[0])/2
if average_line_x_pos > max_hor_line_x_pos:
max_hor_line_x_pos = average_line_x_pos
max_hor_line = ((x1,y1), (x2,y2))
if average_line_x_pos < min_hor_line_x_pos:
min_hor_line_x_pos = average_line_x_pos
min_hor_line = ((x1,y1), (x2,y2))
bounded_lines[0].append(((x1,y1), (x2,y2)))
elif round(np.rad2deg(b)) == 0: # vertical lines - search x_points
x_m_above = np.argwhere(x_points >= x0 - radi_threshold).T[0]
x_m_below = np.argwhere(x_points < x0 + radi_threshold).T[0]
x_m_ab_b = np.in1d(x_m_above, x_m_below)
x_m_be_b = np.in1d(x_m_below, x_m_above)
x_m_ab_in = np.argwhere(x_m_ab_b == True).T[0]
x_m_be_in = np.argwhere(x_m_be_b == True).T[0]
x_m_temp = np.zeros(len(x_m_ab_in) + len(x_m_be_in))
len_ab = len(x_m_ab_in)
for i in range(len(x_m_temp)):
if i < len_ab:
x_m_temp[i] = x_m_above[x_m_ab_in[i]]
else:
x_m_temp[i] = x_m_below[x_m_be_in[i-len_ab]]
x_m = np.unique(x_m_temp)
if len(x_m) < 2: # Ignore lines of only a single point.
continue
x_dist = {}
for x in x_m:
x = int(x)
x_dist[y_points[x]] = x_points[x]
sorted_x = sorted(x_dist.items(), key=operator.itemgetter(0))
min_point = sorted_x[0]
max_point = sorted_x[-1:][0]
xp = np.zeros(len(x_dist))
fp = np.zeros(len(x_dist))
i = 0
for x, y in sorted_x:
xp[i] = x
fp[i] = y
i += 1
x_inter = np.interp([min_point[1], max_point[1]], xp, fp)
x1 = int(round(x_inter[0]))
y1 = int(round(min_point[0]))
x2 = int(round(x_inter[1]))
y2 = int(round(max_point[0]))
min_vert_points[0].append(x_inter[0]) #x
min_vert_points[1].append(min_point[0]) #y
max_vert_points[0].append(x_inter[1]) #x
max_vert_points[1].append(max_point[0]) #y
average_line_y_pos = x_inter[0] + (x_inter[1] - x_inter[0])/2
if average_line_y_pos > max_vert_line_y_pos:
max_vert_line_y_pos = average_line_y_pos
max_vert_line = ((x1,y1), (x2,y2))
if average_line_y_pos < min_vert_line_y_pos:
min_vert_line_y_pos = average_line_y_pos
min_vert_line = ((x1,y1), (x2,y2))
bounded_lines[1].append(((x1,y1), (x2,y2)))
else:
raise DroneVisionError('find_line_limits_unexpected_angle')
if draw_bounded_lines:
color = (0,255,255)
line_thick = 2
if draw_arrowed_bounded_lines:
tipLength = 0.05
cv2.arrowedLine(frame, (x1,y1), (x2,y2), color, line_thick, tipLength=tipLength)
cv2.arrowedLine(frame, (x2,y2), (x1,y1), color, line_thick, tipLength=tipLength)
else:
cv2.line(frame, (x1,y1), (x2,y2), color, line_thick)
try:
max_min_lines = [max_hor_line, min_hor_line, max_vert_line, min_vert_line]
except:
raise DroneVisionError('find_line_limits_no_hor_or_vert_found')
if draw_max_min_lines and len(hough_lines) > 0:
cv2.line(frame, max_hor_line[0], max_hor_line[1], (255,0,255),4)
cv2.line(frame, min_hor_line[0], min_hor_line[1], (255,0,255),4)
cv2.line(frame, max_vert_line[0], max_vert_line[1], (255,0,255),4)
cv2.line(frame, min_vert_line[0], min_vert_line[1], (255,0,255),4)
return frame, bounded_lines, max_min_lines
def DrawHoughLine(frame, hough_line, color):
'''
@brief Draw hough line on frame.
Hough lines are give in rho, theta coordinates.
@param frame
@param hough_line (rho, theta)
@param color RGB color (R,G,B)
@return frame (with drawn line)
'''
frame = CheckColor(frame)
width, height = GetShape(frame)
size = math.sqrt(math.pow(width, 2.0) + math.pow(height, 2.0))
rho, theta = hough_line
if not(np.isnan(theta)) and not(np.isnan(rho)):
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + size*(-b))
y1 = int(y0 + size*(a))
x2 = int(x0 - size*(-b))
y2 = int(y0 - size*(a))
cv2.line(frame, (x1,y1), (x2,y2), color, 2)
return frame
def DrawHoughLines(frame, hough_lines, color=(255,0,0)):
'''
@brief Draw hough lines on frame.
Hough lines are give in rho, theta coordinates.
@param frame
@param hough_lines List of hough lines (rho, theta)
@param color RGB color (R,G,B)
@return frame (with drawn lines)
'''
for hough_line in hough_lines:
frame = DrawHoughLine(frame, hough_line, color)
return frame
def PrintHoughAccumulator(accumulator, id_m, rhos):
'''
@brief Print Hough transform accumulator in matplotlib figure.
@param accumulator
'''
acc_shape = GetShape(accumulator)
ylabs = np.deg2rad(np.arange(-0.0, 180.0, 1))
acc_map = np.zeros((acc_shape[0], len(ylabs)), dtype=np.uint16)
id_acc = np.argwhere(accumulator > 0)
for i in range(len(id_acc)):
idx = id_acc[i][0]*acc_shape[1] + id_acc[i][1]
rho = rhos[idx / acc_shape[1]]
acc_map[int(rho), idx % acc_shape[1]] += 1
acc_map = cv2.cvtColor(acc_map, cv2.COLOR_GRAY2BGR)
for i in range(len(id_m)):
idx = id_m[i][0]*acc_shape[1] + id_m[i][1]
rho = rhos[idx / acc_shape[1]]
cv2.circle(acc_map,(int(rho), idx % acc_shape[1]), 1, (0,0,255), -1)
acc_map = CheckGrayScale(acc_map)
touple_frame_plot = []
touple_frame_plot.append(('Hough line accumulator', acc_map.T))
MatplotShow(touple_frame_plot, 'Hough_line_accumulator')
def concatenateLines(lines, change_rho):
'''
@brief Concatene hough lines which are within a distance.
@returns concatenated lines. (sorted from min -> max theta)
'''
theta_dict = {}
for rho, theta in lines:
if not(theta in theta_dict):
theta_dict[theta] = []
theta_dict[theta].append(rho)
con_lines = []
for theta in theta_dict:
theta_dict[theta].sort()
j = 0
for i in range(1, len(theta_dict[theta])):
if abs(theta_dict[theta][i] - theta_dict[theta][i-1]) > change_rho:
con_lines.append((np.median(theta_dict[theta][j:i]), theta))
j = i
con_lines.append((np.median(theta_dict[theta][j:]), theta))
return con_lines
def HoughLineEdgePoints(frame, edge_points, horizontal_points, hough_peak_param=1.2, dilation_kernel_size=3, dilation_iterations=1):
'''
@brief Speeded up Houg lines transform for lines by iterating over known key point positions.
Inspired by: https://alyssaq.github.io/2014/understanding-hough-transform/
@param frame
@param edge_points List of detected edge points as [[x], [y]]
@param horizontal_points Set True for detecting horizontal lines, and False for detecting vertical lines.
The lines will be focused around the vertical or horizontal axis.
@param hough_peak_param Delimiter for increasing number of peaks to validate for finding the most significant peaks. Must be >= 1.0.
@param dilation_kernel_size (Increase strong areas of peak points by dilation.
Set the kernel size for dilation as a dilation_kernel_size*dilation_kernel_size (f.eks 3*3 kernel size).
Set to 1 to give the dilation no effect. (default=3))
@param dilation_iterations (Number of dilation iterations to increase the width of strong areas (default=1))
@return (rho, theta) (Distance and angle of the most significant edge.)
'''
# Rho and Theta ranges
if horizontal_points:
start_degree = 0.0
end_degree = 45.0
step_degree = 1.0
thetas_small = np.deg2rad(np.arange(start_degree, end_degree, step_degree))
start_degree = 135.0
end_degree = 180.0
step_degree = 1.0
thetas_big = np.deg2rad(np.arange(start_degree, end_degree, step_degree))
thetas = np.concatenate((thetas_small, thetas_big))
else: # vertical points
start_degree = 45.0
end_degree = 135.0
step_degree = 1.0
thetas = np.deg2rad(np.arange(start_degree, end_degree, step_degree))
width, height = GetShape(frame)
diag_len = np.ceil(np.sqrt(width * width + height * height)) # max_dist
rhos = np.linspace(-np.int(diag_len), np.int(diag_len), np.int(diag_len) * 2)
# Cache some reusable values
cos_t = np.cos(thetas)
sin_t = np.sin(thetas)
num_thetas = len(thetas)
# Hough accumulator array of theta vs rho
accumulator = np.zeros((int(2 * diag_len), num_thetas), dtype=np.float32)
acc_shape = GetShape(accumulator)
# Vote in the hough accumulator
for i in range(len(edge_points[0])):
x = edge_points[0][i]
y = edge_points[1][i]
for t_idx in range(num_thetas):
# Calculate rho. diag_len is added for a positive index
rho = int(round(x * cos_t[t_idx] + y * sin_t[t_idx]) + diag_len)
accumulator[rho, t_idx] += 1
# Dilate the accumulator so that close neighboring voting points are stronger, and add the Gaussian smoothed accumulator to highlight the strongest peak in the strongest areas.
accumulator = cv2.dilate(accumulator, np.ones((dilation_kernel_size,dilation_kernel_size), dtype=np.uint8), iterations=dilation_iterations)
accumulator += cv2.GaussianBlur(accumulator, (dilation_kernel_size,dilation_kernel_size), 0) # Let sigma be calculated according to the kernel size. See cv2 doc.
# Peak finding based on max votes.
id_m = np.argwhere(accumulator >= np.max(accumulator)/hough_peak_param)
len_id_m = len(id_m)
rhos_res = [0]*len_id_m
thetas_res = [0]*len_id_m
for i in range(len_id_m):
idx = id_m[i][0]*acc_shape[1] + id_m[i][1]
rhos_res[i] = rhos[idx / acc_shape[1]]
thetas_res[i] = thetas[idx % acc_shape[1]]
rho = np.median(rhos_res)
theta = np.median(thetas_res)
return (rho, theta)
def HoughLinesPointMatrix(frame, keypoints, min_lines=2, radi_threshold=None, radi_threshold_tuning_param=2.0):
'''
@brief Speeded up Houg lines transform for lines by iterating over known key point positions.
Inspired by: https://alyssaq.github.io/2014/understanding-hough-transform/
@param frame
@param keypoints List of detected points (using the blob detection algorithm.)
@param min_lines Minimum lines to finally end up with. Set to -1 to hinder any concateniation of detected lines.
@param radi_threshold Threshold in pixels to search for in vertical and horizontal axis (If none, then it is set to the biggest blob size)
@param radi_threshold_tuning_param Threshold tuning parameter (default=2 when using biggest blob size. < 0.5 is recommended when using distance betwen blobs).
@return hough_lines Returned as list of touples (rho, theta)
'''
# Rho and Theta ranges
thetas = np.deg2rad(np.array([0.0, 90.0]))
width, height = GetShape(frame)
diag_len = np.ceil(np.sqrt(width * width + height * height)) # max_dist
rhos = np.linspace(-np.int(diag_len), np.int(diag_len), np.int(diag_len) * 2)
# Cache some resuable values
cos_t = np.cos(thetas)
sin_t = np.sin(thetas)
num_thetas = len(thetas)
# Hough accumulator array of theta vs rho
accumulator = np.zeros((int(2 * diag_len), num_thetas), dtype=np.float32)
acc_shape = GetShape(accumulator)
biggest_point = 0
# Vote in the hough accumulator
for i in range(len(keypoints)):
x = keypoints[i].pt[0]
y = keypoints[i].pt[1]
if keypoints[i].size > biggest_point:
biggest_point = keypoints[i].size
for t_idx in range(num_thetas):
# Calculate rho. diag_len is added for a positive index
rho = int(round(x * cos_t[t_idx] + y * sin_t[t_idx]) + diag_len)
accumulator[rho, t_idx] += 1
# Peak finding based on max votes
id_m = np.argwhere(accumulator >= 1)
len_id_m = len(id_m)
hough_lines = [0]*len_id_m
#PrintHoughAccumulator(accumulator, id_m, rhos)
for i in range(len_id_m):
idx = id_m[i][0]*acc_shape[1] + id_m[i][1]
rho = rhos[idx / acc_shape[1]]
theta = thetas[idx % acc_shape[1]]
hough_lines[i] = (rho, theta)
if radi_threshold != None:
origin_threshold = radi_threshold*radi_threshold_tuning_param
else:
origin_threshold = biggest_point
threshold = origin_threshold
n_lines = len(hough_lines) + 1 #Stop concatenating when there is no change in n_lines
while len(hough_lines) > min_lines and n_lines - len(hough_lines) > 0 and min_lines > 0:
n_lines = len(hough_lines)
hough_lines = concatenateLines(hough_lines, threshold)
threshold += origin_threshold/2
return hough_lines # returned as touples of (rho, theta) | mit |
openhumanoids/exotica | exotica_python/src/pyexotica/tools.py | 2 | 4212 | from __future__ import print_function, division
import numpy as np
from time import time
import matplotlib.pyplot as plt
from collections import OrderedDict
__all__ = [
"check_trajectory_continuous_time",
"check_whether_trajectory_is_collision_free_by_subsampling",
"get_colliding_links",
"plot_task_cost_over_time",
]
def check_trajectory_continuous_time(scene, trajectory):
start_time = time()
all_good = True
robot_links = scene.get_collision_robot_links()
world_links = scene.get_collision_world_links()
for t in range(1, trajectory.shape[0]):
q_t_1 = trajectory[t - 1, :]
q_t_2 = trajectory[t, :]
for r_l in robot_links:
for w_l in world_links:
scene.update(q_t_1)
T_r_l_1 = scene.fk(r_l)
T_w_l_1 = scene.fk(w_l)
scene.update(q_t_2)
T_r_l_2 = scene.fk(r_l)
T_w_l_2 = scene.fk(w_l)
p = scene.get_collision_scene().continuous_collision_check(
r_l, T_r_l_1, T_r_l_2, w_l, T_w_l_1, T_w_l_2
)
if p.in_collision:
print(t, p)
all_good = False
end_time = time()
print("Continuous-time collision verification took", end_time - start_time)
return all_good
def check_whether_trajectory_is_collision_free_by_subsampling(
scene, trajectory, num_subsamples=10, debug=False
):
"""
num_subsamples specifies how many steps are checked between two configurations. Returns True if trajectory is collision-free, and False otherwise.
TODO: Support setting time for Scene update.
"""
start_time = time()
trajectory_length = trajectory.shape[0]
for t in range(1, trajectory_length):
q_t_1 = trajectory[t - 1, :]
q_t_2 = trajectory[t, :]
q_t_interpolation = np.linspace(q_t_1, q_t_2, num_subsamples)
for i in range(num_subsamples):
scene.update(q_t_interpolation[i, :])
if not scene.is_state_valid(True):
return False
end_time = time()
if debug:
print("Trajectory transition collision check took", end_time - start_time)
return True
def get_colliding_links(
scene, margin=0.0, safe_distance=0.0, check_self_collision=True, debug=False
):
robotLinks = scene.get_collision_robot_links()
world_links = scene.get_collision_world_links()
collisions = []
for r_l in robotLinks:
for w_l in world_links:
if scene.is_allowed_to_collide(r_l, w_l, True):
if not scene.is_collision_free(r_l, w_l, margin):
d = scene.get_collision_distance(r_l, w_l)
if abs(d[0].distance) > safe_distance:
collisions.append((r_l, w_l, d[0].distance))
if debug:
print(r_l, "-", w_l, "d=", d[0].distance)
if check_self_collision:
for w_l in robotLinks:
if w_l != r_l:
if scene.is_allowed_to_collide(r_l, w_l, True):
if not scene.is_collision_free(r_l, w_l, margin):
d = scene.get_collision_distance(r_l, w_l)
if abs(d[0].distance) > safe_distance:
collisions.append((r_l, w_l, d[0].distance))
if debug:
print(r_l, "-", w_l, d[0].distance)
return collisions
def plot_task_cost_over_time(problem):
"""
Plots the task cost (task maps) over time given a problem.
"""
costs = OrderedDict()
for task_name in problem.cost.task_maps:
costs[task_name] = np.zeros((problem.T,))
for t in range(problem.T):
ydiff = problem.cost.get_task_error(task_name, t)
S = problem.cost.get_S(task_name, t)
cost = np.dot(np.dot(ydiff, S), ydiff.T)
costs[task_name][t] = cost
fig = plt.figure()
for task_name in costs:
plt.plot(costs[task_name], label=task_name)
plt.title("Task cost over time")
plt.legend()
plt.tight_layout()
plt.show()
| bsd-3-clause |
AllenDowney/ThinkStats2 | code/analytic.py | 1 | 6275 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import math
import numpy as np
import pandas
import nsfg
import thinkplot
import thinkstats2
def ParetoMedian(xmin, alpha):
"""Computes the median of a Pareto distribution."""
return xmin * pow(2, 1/alpha)
def MakeExpoCdf():
"""Generates a plot of the exponential CDF."""
thinkplot.PrePlot(3)
for lam in [2.0, 1, 0.5]:
xs, ps = thinkstats2.RenderExpoCdf(lam, 0, 3.0, 50)
label = r'$\lambda=%g$' % lam
thinkplot.Plot(xs, ps, label=label)
thinkplot.Save(root='analytic_expo_cdf',
title='Exponential CDF',
xlabel='x',
ylabel='CDF')
def ReadBabyBoom(filename='babyboom.dat'):
"""Reads the babyboom data.
filename: string
returns: DataFrame
"""
var_info = [
('time', 1, 8, int),
('sex', 9, 16, int),
('weight_g', 17, 24, int),
('minutes', 25, 32, int),
]
columns = ['name', 'start', 'end', 'type']
variables = pandas.DataFrame(var_info, columns=columns)
variables.end += 1
dct = thinkstats2.FixedWidthVariables(variables, index_base=1)
df = dct.ReadFixedWidth(filename, skiprows=59)
return df
def MakeBabyBoom():
"""Plot CDF of interarrival time on log and linear scales.
"""
# compute the interarrival times
df = ReadBabyBoom()
diffs = df.minutes.diff()
cdf = thinkstats2.Cdf(diffs, label='actual')
thinkplot.PrePlot(cols=2)
thinkplot.Cdf(cdf)
thinkplot.Config(xlabel='minutes',
ylabel='CDF',
legend=False)
thinkplot.SubPlot(2)
thinkplot.Cdf(cdf, complement=True)
thinkplot.Config(xlabel='minutes',
ylabel='CCDF',
yscale='log',
legend=False)
thinkplot.Save(root='analytic_interarrivals',
legend=False)
def MakeParetoCdf():
"""Generates a plot of the Pareto CDF."""
xmin = 0.5
thinkplot.PrePlot(3)
for alpha in [2.0, 1.0, 0.5]:
xs, ps = thinkstats2.RenderParetoCdf(xmin, alpha, 0, 10.0, n=100)
thinkplot.Plot(xs, ps, label=r'$\alpha=%g$' % alpha)
thinkplot.Save(root='analytic_pareto_cdf',
title='Pareto CDF',
xlabel='x',
ylabel='CDF')
def MakeParetoCdf2():
"""Generates a plot of the CDF of height in Pareto World."""
xmin = 100
alpha = 1.7
xs, ps = thinkstats2.RenderParetoCdf(xmin, alpha, 0, 1000.0, n=100)
thinkplot.Plot(xs, ps)
thinkplot.Save(root='analytic_pareto_height',
title='Pareto CDF',
xlabel='height (cm)',
ylabel='CDF',
legend=False)
def MakeNormalCdf():
"""Generates a plot of the normal CDF."""
thinkplot.PrePlot(3)
mus = [1.0, 2.0, 3.0]
sigmas = [0.5, 0.4, 0.3]
for mu, sigma in zip(mus, sigmas):
xs, ps = thinkstats2.RenderNormalCdf(mu=mu, sigma=sigma,
low=-1.0, high=4.0)
label = r'$\mu=%g$, $\sigma=%g$' % (mu, sigma)
thinkplot.Plot(xs, ps, label=label)
thinkplot.Save(root='analytic_normal_cdf',
title='Normal CDF',
xlabel='x',
ylabel='CDF',
loc=2)
def MakeNormalModel(weights):
"""Plot the CDF of birthweights with a normal model."""
# estimate parameters: trimming outliers yields a better fit
mu, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
print('Mean, Var', mu, var)
# plot the model
sigma = math.sqrt(var)
print('Sigma', sigma)
xs, ps = thinkstats2.RenderNormalCdf(mu, sigma, low=0, high=12.5)
thinkplot.Plot(xs, ps, label='model', color='0.8')
# plot the data
cdf = thinkstats2.Cdf(weights, label='data')
thinkplot.PrePlot(1)
thinkplot.Cdf(cdf)
thinkplot.Save(root='analytic_birthwgt_model',
title='Birth weights',
xlabel='birth weight (lbs)',
ylabel='CDF')
def MakeExampleNormalPlot():
"""Generates a sample normal probability plot.
"""
n = 1000
thinkplot.PrePlot(3)
mus = [0, 1, 5]
sigmas = [1, 1, 2]
for mu, sigma in zip(mus, sigmas):
sample = np.random.normal(mu, sigma, n)
xs, ys = thinkstats2.NormalProbability(sample)
label = '$\mu=%d$, $\sigma=%d$' % (mu, sigma)
thinkplot.Plot(xs, ys, label=label)
thinkplot.Save(root='analytic_normal_prob_example',
title='Normal probability plot',
xlabel='standard normal sample',
ylabel='sample values')
def MakeNormalPlot(weights, term_weights):
"""Generates a normal probability plot of birth weights."""
mean, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
std = math.sqrt(var)
xs = [-4, 4]
fxs, fys = thinkstats2.FitLine(xs, mean, std)
thinkplot.Plot(fxs, fys, linewidth=4, color='0.8')
thinkplot.PrePlot(2)
xs, ys = thinkstats2.NormalProbability(weights)
thinkplot.Plot(xs, ys, label='all live')
xs, ys = thinkstats2.NormalProbability(term_weights)
thinkplot.Plot(xs, ys, label='full term')
thinkplot.Save(root='analytic_birthwgt_normal',
title='Normal probability plot',
xlabel='Standard deviations from mean',
ylabel='Birth weight (lbs)')
def main():
thinkstats2.RandomSeed(18)
MakeExampleNormalPlot()
# make the analytic CDFs
MakeExpoCdf()
MakeBabyBoom()
MakeParetoCdf()
MakeParetoCdf2()
MakeNormalCdf()
# test the distribution of birth weights for normality
preg = nsfg.ReadFemPreg()
full_term = preg[preg.prglngth >= 37]
weights = preg.totalwgt_lb.dropna()
term_weights = full_term.totalwgt_lb.dropna()
MakeNormalModel(weights)
MakeNormalPlot(weights, term_weights)
if __name__ == "__main__":
main()
| gpl-3.0 |
maxalbert/bokeh | examples/interactions/interactive_bubble/gapminder.py | 8 | 4161 | import pandas as pd
from jinja2 import Template
from bokeh.browserlib import view
from bokeh.models import (
ColumnDataSource, Plot, Circle, Range1d,
LinearAxis, HoverTool, Text,
SingleIntervalTicker, CustomJS, Slider
)
from bokeh.palettes import Spectral6
from bokeh.plotting import vplot
from bokeh.resources import JSResources
from bokeh.embed import file_html
from data import process_data
fertility_df, life_expectancy_df, population_df_size, regions_df, years, regions = process_data()
sources = {}
region_color = regions_df['region_color']
region_color.name = 'region_color'
for year in years:
fertility = fertility_df[year]
fertility.name = 'fertility'
life = life_expectancy_df[year]
life.name = 'life'
population = population_df_size[year]
population.name = 'population'
new_df = pd.concat([fertility, life, population, region_color], axis=1)
sources['_' + str(year)] = ColumnDataSource(new_df)
dictionary_of_sources = dict(zip([x for x in years], ['_%s' % x for x in years]))
js_source_array = str(dictionary_of_sources).replace("'", "")
xdr = Range1d(1, 9)
ydr = Range1d(20, 100)
plot = Plot(
x_range=xdr,
y_range=ydr,
title="",
plot_width=800,
plot_height=400,
outline_line_color=None,
toolbar_location=None,
)
AXIS_FORMATS = dict(
minor_tick_in=None,
minor_tick_out=None,
major_tick_in=None,
major_label_text_font_size="10pt",
major_label_text_font_style="normal",
axis_label_text_font_size="10pt",
axis_line_color='#AAAAAA',
major_tick_line_color='#AAAAAA',
major_label_text_color='#666666',
major_tick_line_cap="round",
axis_line_cap="round",
axis_line_width=1,
major_tick_line_width=1,
)
xaxis = LinearAxis(SingleIntervalTicker(interval=1), axis_label="Children per woman (total fertility)", **AXIS_FORMATS)
yaxis = LinearAxis(SingleIntervalTicker(interval=20), axis_label="Life expectancy at birth (years)", **AXIS_FORMATS)
plot.add_layout(xaxis, 'below')
plot.add_layout(yaxis, 'left')
# ### Add the background year text
# We add this first so it is below all the other glyphs
text_source = ColumnDataSource({'year': ['%s' % years[0]]})
text = Text(x=2, y=35, text='year', text_font_size='150pt', text_color='#EEEEEE')
plot.add_glyph(text_source, text)
# Add the circle
renderer_source = sources['_%s' % years[0]]
circle_glyph = Circle(
x='fertility', y='life', size='population',
fill_color='region_color', fill_alpha=0.8,
line_color='#7c7e71', line_width=0.5, line_alpha=0.5)
circle_renderer = plot.add_glyph(renderer_source, circle_glyph)
# Add the hover (only against the circle and not other plot elements)
tooltips = "@index"
plot.add_tools(HoverTool(tooltips=tooltips, renderers=[circle_renderer]))
# Add the legend
text_x = 7
text_y = 95
for i, region in enumerate(regions):
plot.add_glyph(Text(x=text_x, y=text_y, text=[region], text_font_size='10pt', text_color='#666666'))
plot.add_glyph(Circle(x=text_x - 0.1, y=text_y + 2, fill_color=Spectral6[i], size=10, line_color=None, fill_alpha=0.8))
text_y = text_y - 5
# Add the slider
code = """
var year = slider.get('value'),
sources = %s,
new_source_data = sources[year].get('data');
renderer_source.set('data', new_source_data);
text_source.set('data', {'year': [String(year)]});
""" % js_source_array
callback = CustomJS(args=sources, code=code)
slider = Slider(start=years[0], end=years[-1], value=1, step=1, title="Year", callback=callback, name='testy')
callback.args["renderer_source"] = renderer_source
callback.args["slider"] = slider
callback.args["text_source"] = text_source
# Stick the plot and the slider together
layout = vplot(plot, slider)
# Open our custom template
with open('gapminder_template.jinja', 'r') as f:
template = Template(f.read())
# Use inline resources, render the html and open
js_resources = JSResources(mode='inline')
title = "Bokeh - Gapminder Bubble Plot"
html = file_html(layout, None, title, template=template, js_resources=js_resources)
output_file = 'gapminder.html'
with open(output_file, 'w') as f:
f.write(html)
view(output_file)
| bsd-3-clause |
UPDDI/mps-database-server | clustergrammer/make_unique_labels.py | 2 | 1934 | import pandas as pd
def main(net, df=None):
'''
Run in load_data module (which runs when file is loaded or dataframe is loaded),
check for duplicate row/col names, and add index to names if necesary
'''
if df is None:
df = net.export_df()
# rows
#############
rows = df.index.tolist()
if type(rows[0]) is str:
if len(rows) != len(list(set(rows))):
new_rows = add_index_list(rows)
df.index = new_rows
elif type(rows[0]) is tuple:
row_names = []
for inst_row in rows:
row_names.append(inst_row[0])
if len(row_names) != len(list(set(row_names))):
row_names = add_index_list(row_names)
# add back to tuple
new_rows = []
for inst_index in range(len(rows)):
inst_row = rows[inst_index]
new_row = list(inst_row)
new_row[0] = row_names[inst_index]
new_row = tuple(new_row)
new_rows.append(new_row)
df.index = new_rows
# cols
#############
cols = df.columns.tolist()
if type(cols[0]) is str:
# list column names
if len(cols) != len(list(set(cols))):
new_cols = add_index_list(cols)
df.columns = new_cols
elif type(cols[0]) is tuple:
col_names = []
for inst_col in cols:
col_names.append(inst_col[0])
if len(col_names) != len(list(set(col_names))):
col_names = add_index_list(col_names)
# add back to tuple
new_cols = []
for inst_index in range(len(cols)):
inst_col = cols[inst_index]
new_col = list(inst_col)
new_col[0] = col_names[inst_index]
new_col = tuple(new_col)
new_cols.append(new_col)
df.columns = new_cols
# return dataframe with unique names
return df
def add_index_list(nodes):
new_nodes = []
for i in range(len(nodes)):
index = i + 1
inst_node = nodes[i]
new_node = inst_node + '-' + str(index)
new_nodes.append(new_node)
return new_nodes
| mit |
annoviko/pyclustering | pyclustering/utils/tests/integration/it_metric.py | 1 | 4645 | """!
@brief Integration-tests for metrics.
@authors Andrei Novikov ([email protected])
@date 2014-2020
@copyright BSD-3-Clause
"""
import unittest
# Generate images without having a window appear.
import matplotlib
matplotlib.use('Agg')
import numpy
from pyclustering.core.metric_wrapper import metric_wrapper
from pyclustering.utils.metric import type_metric, distance_metric
class MetricUnitTest(unittest.TestCase):
def testEuclideanMetric(self):
metric_instance = metric_wrapper(type_metric.EUCLIDEAN, [], None)
self.assertEqual(2.0, metric_instance([0.0, 0.0], [2.0, 0.0]))
def testSquareEuclideanMetric(self):
metric_instance = metric_wrapper(type_metric.EUCLIDEAN_SQUARE, [], None)
self.assertEqual(4.0, metric_instance([0.0, 0.0], [2.0, 0.0]))
def testManhattanMetric(self):
metric_instance = metric_wrapper(type_metric.MANHATTAN, [], None)
self.assertEqual(3.0, metric_instance([1.0, 2.0], [0.0, 0.0]))
def testChebyshevMetric(self):
metric_instance = metric_wrapper(type_metric.CHEBYSHEV, [], None)
self.assertEqual(4.0, metric_instance([1.0, 4.0], [0.0, 0.0]))
def testMinkowskiMetric(self):
metric_instance = metric_wrapper(type_metric.MINKOWSKI, [2.0], None)
self.assertEqual(2.0, metric_instance([0.0, 0.0], [2.0, 0.0]))
def testCanberraMetric(self):
metric_instance = metric_wrapper(type_metric.CANBERRA, [], None)
self.assertEqual(0.0, metric_instance([0.0, 0.0], [0.0, 0.0]))
self.assertEqual(2.0, metric_instance([0.0, 0.0], [1.0, 1.0]))
self.assertEqual(1.0, metric_instance([0.75, 0.75], [0.25, 0.25]))
self.assertEqual(0.0, metric_instance([-1.0, -1.0], [-1.0, -1.0]))
self.assertEqual(0.4, metric_instance([-2.0, -2.0], [-3.0, -3.0]))
def testChiSquareMetric(self):
metric_instance = metric_wrapper(type_metric.CHI_SQUARE, [], None)
self.assertEqual(0.0, metric_instance([0.0, 0.0], [0.0, 0.0]))
self.assertEqual(2.0, metric_instance([0.0, 0.0], [1.0, 1.0]))
self.assertEqual(0.5, metric_instance([0.75, 0.75], [0.25, 0.25]))
self.assertEqual(0.0, metric_instance([-1.0, -1.0], [-1.0, -1.0]))
self.assertEqual(0.4, metric_instance([-2.0, -2.0], [-3.0, -3.0]))
def testGowerDistance(self):
metric_instance = metric_wrapper(type_metric.GOWER, [0.0], None)
self.assertEqual(0.0, metric_instance([0.0], [0.0]))
metric_instance = metric_wrapper(type_metric.GOWER, [1.0, 1.0], None)
self.assertEqual(1.0, metric_instance([0.0, 0.0], [1.0, 1.0]))
metric_instance = metric_wrapper(type_metric.GOWER, [0.5, 0.5], None)
self.assertEqual(1.0, metric_instance([0.75, 0.75], [0.25, 0.25]))
metric_instance = metric_wrapper(type_metric.GOWER, [0.0, 0.0], None)
self.assertEqual(0.0, metric_instance([-1.0, -1.0], [-1.0, -1.0]))
metric_instance = metric_wrapper(type_metric.GOWER, [1.0, 1.0], None)
self.assertEqual(1.0, metric_instance([-2.0, -2.0], [-3.0, -3.0]))
def testBuildGowerDistanceFromMetricWithMaxRange(self):
metric = distance_metric(type_metric.GOWER, max_range=[2.0, 0.0])
ccore_metric = metric_wrapper.create_instance(metric)
self.assertEqual(0.5, ccore_metric([-3.0, -3.0], [-5.0, -3.0]))
def testBuildGowerDistanceFromMetricWithNumpyMaxRange(self):
metric = distance_metric(type_metric.GOWER, max_range=numpy.array([2.0, 0.0]))
ccore_metric = metric_wrapper.create_instance(metric)
self.assertEqual(0.5, ccore_metric([-3.0, -3.0], [-5.0, -3.0]))
def testBuildGowerDistanceFromMetricWithData(self):
metric = distance_metric(type_metric.GOWER, data=[[-3.0, -3.0], [-4.0, -3.0], [-4.5, -3.0], [-5.0, -3.0]])
ccore_metric = metric_wrapper.create_instance(metric)
self.assertEqual(0.5, ccore_metric([-3.0, -3.0], [-5.0, -3.0]))
def testBuildGowerDistanceFromMetricWithNumpyData(self):
metric = distance_metric(type_metric.GOWER, data=numpy.array([[-3.0, -3.0], [-4.0, -3.0], [-4.5, -3.0], [-5.0, -3.0]]))
ccore_metric = metric_wrapper.create_instance(metric)
self.assertEqual(0.5, ccore_metric([-3.0, -3.0], [-5.0, -3.0]))
# TODO: doesn't work for some platforms.
#def testUserDefinedMetric(self):
# user_metric = lambda p1, p2 : p1[0] + p2[0];
# metric_instance = metric_wrapper(type_metric.USER_DEFINED, [], user_metric);
# assertion.eq(2.0, metric_instance([0.0, 0.0], [2.0, 0.0]));
# assertion.eq(4.0, metric_instance([3.0, 2.0], [1.0, 5.0])); | gpl-3.0 |
astroML/astroML | examples/datasets/plot_great_wall.py | 2 | 1071 | """
SDSS "Great Wall"
-----------------
Plotting the SDSS "great wall", a filament of galaxies visible by-eye in the
projected locations of the SDSS spectroscopic galaxy sample.
This follows a similar procedure to [1]_,
References
----------
.. [1] http://adsabs.harvard.edu/abs/2008ApJ...674L..13C
"""
# Author: Jake VanderPlas <[email protected]>
# License: BSD
# The figure is an example from astroML: see http://astroML.github.com
import numpy as np
from matplotlib import pyplot as plt
from astroML.datasets import fetch_great_wall
from astroML.density_estimation import KNeighborsDensity
#------------------------------------------------------------
# Fetch the great wall data
X = fetch_great_wall()
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(8, 4))
# First plot: scatter the points
ax = plt.subplot(111, aspect='equal')
ax.scatter(X[:, 1], X[:, 0], s=1, lw=0, c='k')
ax.set_xlim(-300, 200)
ax.set_ylim(-375, -175)
ax.set_xlabel('y (Mpc)')
ax.set_ylabel('x (MPC)')
plt.show()
| bsd-2-clause |
clancyian/my_vimrc | ipython_config.py | 2 | 22105 | # Configuration file for ipython.
#------------------------------------------------------------------------------
# InteractiveShellApp(Configurable) configuration
#------------------------------------------------------------------------------
## A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
## Execute the given command string.
#c.InteractiveShellApp.code_to_run = ''
## Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
#c.InteractiveShellApp.exec_PYTHONSTARTUP = True
## List of files to run at IPython startup.
#c.InteractiveShellApp.exec_files = []
## lines of code to run at IPython startup.
#c.InteractiveShellApp.exec_lines = []
## A list of dotted module names of IPython extensions to load.
#c.InteractiveShellApp.extensions = []
## dotted module name of an IPython extension to load.
#c.InteractiveShellApp.extra_extension = ''
## A file to be run
#c.InteractiveShellApp.file_to_run = ''
## Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk2', 'gtk3',
# 'osx', 'pyglet', 'qt', 'qt4', 'qt5', 'tk', 'wx', 'gtk2', 'qt4').
#c.InteractiveShellApp.gui = None
## Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
#c.InteractiveShellApp.hide_initial_ns = True
## Configure matplotlib for interactive use with the default matplotlib backend.
#c.InteractiveShellApp.matplotlib = None
## Run the module as a script.
#c.InteractiveShellApp.module_to_run = ''
## Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
#c.InteractiveShellApp.pylab = None
## If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
#c.InteractiveShellApp.pylab_import_all = True
## Reraise exceptions encountered loading IPython extensions?
#c.InteractiveShellApp.reraise_ipython_extension_failures = False
#------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## This is an application.
## The date format used by logging formatters for %(asctime)s
#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
#c.Application.log_level = 30
#------------------------------------------------------------------------------
# BaseIPythonApplication(Application) configuration
#------------------------------------------------------------------------------
## IPython: an enhanced interactive Python shell.
## Whether to create profile dir if it doesn't exist
#c.BaseIPythonApplication.auto_create = False
## Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
#c.BaseIPythonApplication.copy_config_files = False
## Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
#c.BaseIPythonApplication.extra_config_file = u''
## The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
#c.BaseIPythonApplication.ipython_dir = u''
## Whether to overwrite existing config files when copying
#c.BaseIPythonApplication.overwrite = False
## The IPython profile to use.
#c.BaseIPythonApplication.profile = u'default'
## Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
#c.BaseIPythonApplication.verbose_crash = False
#------------------------------------------------------------------------------
# TerminalIPythonApp(BaseIPythonApplication,InteractiveShellApp) configuration
#------------------------------------------------------------------------------
## Whether to display a banner upon starting IPython.
#c.TerminalIPythonApp.display_banner = True
## If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
#c.TerminalIPythonApp.force_interact = False
## Start IPython quickly by skipping the loading of config files.
#c.TerminalIPythonApp.quick = False
#------------------------------------------------------------------------------
# InteractiveShell(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## An enhanced, interactive shell for Python.
## 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
#c.InteractiveShell.ast_node_interactivity = 'last_expr'
## A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
#c.InteractiveShell.ast_transformers = []
## Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
#c.InteractiveShell.autocall = 0
## Autoindent IPython code entered interactively.
#c.InteractiveShell.autoindent = True
## Enable magic commands to be called without the leading %.
#c.InteractiveShell.automagic = True
## The part of the banner to be printed before the profile
#c.InteractiveShell.banner1 = 'Python 2.7.12 |Continuum Analytics, Inc.| (default, Jun 29 2016, 11:08:50) \nType "copyright", "credits" or "license" for more information.\n\nIPython 5.1.0 -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
## The part of the banner to be printed after the profile
#c.InteractiveShell.banner2 = ''
## Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
#c.InteractiveShell.cache_size = 1000
## Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
#c.InteractiveShell.color_info = True
## Set the color scheme (NoColor, Neutral, Linux, or LightBG).
#c.InteractiveShell.colors = 'Neutral'
##
#c.InteractiveShell.debug = False
## **Deprecated**
#
# Will be removed in IPython 6.0
#
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). `deep_reload`
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
#c.InteractiveShell.deep_reload = False
## Don't call post-execute functions that have failed in the past.
#c.InteractiveShell.disable_failing_post_execute = False
## If True, anything that would be passed to the pager will be displayed as
# regular output instead.
#c.InteractiveShell.display_page = False
## (Provisional API) enables html representation in mime bundles sent to pagers.
#c.InteractiveShell.enable_html_pager = False
## Total length of command history
#c.InteractiveShell.history_length = 10000
## The number of saved history entries to be loaded into the history buffer at
# startup.
#c.InteractiveShell.history_load_length = 1000
##
#c.InteractiveShell.ipython_dir = ''
## Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
#c.InteractiveShell.logappend = ''
## The name of the logfile to use.
#c.InteractiveShell.logfile = ''
## Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
#c.InteractiveShell.logstart = False
##
#c.InteractiveShell.object_info_string_level = 0
## Automatically call the pdb debugger after every exception.
#c.InteractiveShell.pdb = False
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
#c.InteractiveShell.prompt_in1 = 'In [\\#]: '
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
#c.InteractiveShell.prompt_in2 = ' .\\D.: '
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
#c.InteractiveShell.prompt_out = 'Out[\\#]: '
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
#c.InteractiveShell.prompts_pad_left = True
##
#c.InteractiveShell.quiet = False
##
#c.InteractiveShell.separate_in = '\n'
##
#c.InteractiveShell.separate_out = ''
##
#c.InteractiveShell.separate_out2 = ''
## Show rewritten input, e.g. for autocall.
#c.InteractiveShell.show_rewritten_input = True
## Enables rich html representation of docstrings. (This requires the docrepr
# module).
#c.InteractiveShell.sphinxify_docstring = False
##
#c.InteractiveShell.wildcards_case_sensitive = True
##
#c.InteractiveShell.xmode = 'Context'
#------------------------------------------------------------------------------
# TerminalInteractiveShell(InteractiveShell) configuration
#------------------------------------------------------------------------------
## Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
#c.TerminalInteractiveShell.confirm_exit = True
## Options for displaying tab completions, 'column', 'multicolumn', and
# 'readlinelike'. These options are for `prompt_toolkit`, see `prompt_toolkit`
# documentation for more information.
#c.TerminalInteractiveShell.display_completions = 'multicolumn'
## Shortcut style to use at the prompt. 'vi' or 'emacs'.
c.TerminalInteractiveShell.editing_mode = 'vi'
## Set the editor used by IPython (default to $EDITOR/vi/notepad).
c.TerminalInteractiveShell.editor = 'vi'
## Highlight matching brackets .
#c.TerminalInteractiveShell.highlight_matching_brackets = True
## The name of a Pygments style to use for syntax highlighting: manni, igor,
# lovelace, xcode, vim, autumn, vs, rrt, native, perldoc, borland, tango, emacs,
# friendly, monokai, paraiso-dark, colorful, murphy, bw, pastie, algol_nu,
# paraiso-light, trac, default, algol, fruity
#c.TerminalInteractiveShell.highlighting_style = 'legacy'
## Override highlighting format for specific tokens
#c.TerminalInteractiveShell.highlighting_style_overrides = {}
## Enable mouse support in the prompt
#c.TerminalInteractiveShell.mouse_support = False
## Class used to generate Prompt token for prompt_toolkit
#c.TerminalInteractiveShell.prompts_class = 'IPython.terminal.prompts.Prompts'
## Use `raw_input` for the REPL, without completion, multiline input, and prompt
# colors.
#
# Useful when controlling IPython as a subprocess, and piping STDIN/OUT/ERR.
# Known usage are: IPython own testing machinery, and emacs inferior-shell
# integration through elpy.
#
# This mode default to `True` if the `IPY_TEST_SIMPLE_PROMPT` environment
# variable is set, or the current terminal is not a tty.
#c.TerminalInteractiveShell.simple_prompt = False
## Number of line at the bottom of the screen to reserve for the completion menu
#c.TerminalInteractiveShell.space_for_menu = 6
## Automatically set the terminal title
#c.TerminalInteractiveShell.term_title = True
## Use 24bit colors instead of 256 colors in prompt highlighting. If your
# terminal supports true color, the following command should print 'TRUECOLOR'
# in orange: printf "\x1b[38;2;255;100;0mTRUECOLOR\x1b[0m\n"
#c.TerminalInteractiveShell.true_color = False
#------------------------------------------------------------------------------
# HistoryAccessor(HistoryAccessorBase) configuration
#------------------------------------------------------------------------------
## Access the history database without adding to it.
#
# This is intended for use by standalone history tools. IPython shells use
# HistoryManager, below, which is a subclass of this.
## Options for configuring the SQLite connection
#
# These options are passed as keyword args to sqlite3.connect when establishing
# database conenctions.
#c.HistoryAccessor.connection_options = {}
## enable the SQLite history
#
# set enabled=False to disable the SQLite history, in which case there will be
# no stored history, no SQLite connection, and no background saving thread.
# This may be necessary in some threaded environments where IPython is embedded.
#c.HistoryAccessor.enabled = True
## Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
#
# you can also use the specific value `:memory:` (including the colon at both
# end but not the back ticks), to avoid creating an history file.
#c.HistoryAccessor.hist_file = u''
#------------------------------------------------------------------------------
# HistoryManager(HistoryAccessor) configuration
#------------------------------------------------------------------------------
## A class to organize all history-related functionality in one place.
## Write to database every x commands (higher values save disk access & power).
# Values of 1 or less effectively disable caching.
#c.HistoryManager.db_cache_size = 0
## Should the history database include output? (default: no)
#c.HistoryManager.db_log_output = False
#------------------------------------------------------------------------------
# ProfileDir(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
## Set the profile location directly. This overrides the logic used by the
# `profile` option.
#c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# BaseFormatter(Configurable) configuration
#------------------------------------------------------------------------------
## A base formatter class that is configurable.
#
# This formatter should usually be used as the base class of all formatters. It
# is a traited :class:`Configurable` class and includes an extensible API for
# users to determine how their objects are formatted. The following logic is
# used to find a function to format an given object.
#
# 1. The object is introspected to see if it has a method with the name
# :attr:`print_method`. If is does, that object is passed to that method
# for formatting.
# 2. If no print method is found, three internal dictionaries are consulted
# to find print method: :attr:`singleton_printers`, :attr:`type_printers`
# and :attr:`deferred_printers`.
#
# Users should use these dictionaries to register functions that will be used to
# compute the format data for their objects (if those objects don't have the
# special print methods). The easiest way of using these dictionaries is through
# the :meth:`for_type` and :meth:`for_type_by_name` methods.
#
# If no function/callable is found to compute the format data, ``None`` is
# returned and this format type is not used.
##
#c.BaseFormatter.deferred_printers = {}
##
#c.BaseFormatter.enabled = True
##
#c.BaseFormatter.singleton_printers = {}
##
#c.BaseFormatter.type_printers = {}
#------------------------------------------------------------------------------
# PlainTextFormatter(BaseFormatter) configuration
#------------------------------------------------------------------------------
## The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
##
#c.PlainTextFormatter.float_precision = ''
## Truncate large collections (lists, dicts, tuples, sets) to this size.
#
# Set to 0 to disable truncation.
#c.PlainTextFormatter.max_seq_length = 1000
##
#c.PlainTextFormatter.max_width = 79
##
#c.PlainTextFormatter.newline = '\n'
##
#c.PlainTextFormatter.pprint = True
##
#c.PlainTextFormatter.verbose = False
#------------------------------------------------------------------------------
# Completer(Configurable) configuration
#------------------------------------------------------------------------------
## Activate greedy completion PENDING DEPRECTION. this is now mostly taken care
# of with Jedi.
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
#c.Completer.greedy = False
#------------------------------------------------------------------------------
# IPCompleter(Completer) configuration
#------------------------------------------------------------------------------
## Extension of the completer class with IPython-specific features
## DEPRECATED as of version 5.0.
#
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
#c.IPCompleter.limit_to__all__ = False
## Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
#c.IPCompleter.merge_completions = True
## Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
#c.IPCompleter.omit__names = 2
#------------------------------------------------------------------------------
# ScriptMagics(Magics) configuration
#------------------------------------------------------------------------------
## Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
## Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_paths
#c.ScriptMagics.script_magics = []
## Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
#c.ScriptMagics.script_paths = {}
#------------------------------------------------------------------------------
# StoreMagics(Magics) configuration
#------------------------------------------------------------------------------
## Lightweight persistence for python variables.
#
# Provides the %store magic.
## If True, any %store-d variables will be automatically restored when IPython
# starts.
#c.StoreMagics.autorestore = False
| unlicense |
phdowling/scikit-learn | examples/classification/plot_classifier_comparison.py | 181 | 4699 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.lda import LDA
from sklearn.qda import QDA
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Decision Tree",
"Random Forest", "AdaBoost", "Naive Bayes", "LDA", "QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(),
GaussianNB(),
LDA(),
QDA()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds in datasets:
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.